diff --git a/plugins/by-name/llm/default.nix b/plugins/by-name/llm/default.nix new file mode 100644 index 00000000..9236b488 --- /dev/null +++ b/plugins/by-name/llm/default.nix @@ -0,0 +1,52 @@ +{ lib, pkgs, ... }: +lib.nixvim.plugins.mkNeovimPlugin { + name = "llm"; + packPathName = "llm.nvim"; + package = "llm-nvim"; + + maintainers = [ lib.maintainers.GaetanLepage ]; + + extraOptions = { + llmLsPackage = lib.mkPackageOption pkgs "llm-ls" { + nullable = true; + }; + }; + extraConfig = cfg: { + extraPackages = [ cfg.llmLsPackage ]; + + # If not setting this option, llm.nvim will try to download the llm-ls binary from the internet. + plugins.llm.settings.lsp.bin_path = lib.mkIf (cfg.llmLsPackage != null) ( + lib.mkDefault (lib.getExe cfg.llmLsPackage) + ); + }; + + settingsOptions = import ./settings-options.nix lib; + + settingsExample = { + max_tokens = 1024; + url = "https://open.bigmodel.cn/api/paas/v4/chat/completions"; + model = "glm-4-flash"; + prefix = { + user = { + text = "😃 "; + hl = "Title"; + }; + assistant = { + text = "⚡ "; + hl = "Added"; + }; + }; + save_session = true; + max_history = 15; + keys = { + "Input:Submit" = { + mode = "n"; + key = ""; + }; + "Input:Cancel" = { + mode = "n"; + key = ""; + }; + }; + }; +} diff --git a/plugins/by-name/llm/settings-options.nix b/plugins/by-name/llm/settings-options.nix new file mode 100644 index 00000000..437931ea --- /dev/null +++ b/plugins/by-name/llm/settings-options.nix @@ -0,0 +1,213 @@ +lib: +let + inherit (lib) types mkOption; + inherit (lib.nixvim) + defaultNullOpts + mkNullOrOption' + mkNullOrStr + mkNullOrStr' + ; +in +{ + api_token = mkNullOrStr '' + Token for authentificating to the backend provider. + + When `api_token` is set, it will be passed as a header: `Authorization: Bearer `. + ''; + + model = mkNullOrStr' { + example = "bigcode/starcoder2-15b"; + description = '' + The model ID, behavior depends on backend + ''; + }; + + backend = defaultNullOpts.mkStr "huggingface" '' + Which backend to use for inference. + ''; + + url = defaultNullOpts.mkStr null '' + The http url of the backend. + ''; + + tokens_to_clear = defaultNullOpts.mkListOf types.str [ "<|endoftext|>" ] '' + List of tokens to remove from the model's output. + ''; + + request_body = { + parameters = mkNullOrOption' { + type = with types; attrsOf anything; + example = { + temperature = 0.2; + top_p = 0.95; + }; + description = '' + Parameters for the model. + ''; + }; + }; + + fim = { + enabled = defaultNullOpts.mkBool true '' + Set this if the model supports fill in the middle. + ''; + + prefix = defaultNullOpts.mkStr "" '' + The beginning of the text sequence to fill. + ''; + + middle = defaultNullOpts.mkStr "" '' + The missing or masked segment that the model should predict. + ''; + + suffix = defaultNullOpts.mkStr "" '' + The text following the missing section. + ''; + }; + + debounce_ms = defaultNullOpts.mkUnsignedInt 150 '' + Time in ms to wait before updating. + ''; + + accept_keymap = defaultNullOpts.mkStr "" '' + Keymap to accept the model suggestion. + ''; + + dismiss_keymap = defaultNullOpts.mkStr "" '' + Keymap to dismiss the model suggestion. + ''; + + tls_skip_verify_insecure = defaultNullOpts.mkBool false '' + Whether to skip TLS verification when accessing the backend. + ''; + + lsp = { + bin_path = mkNullOrOption' { + type = types.str; + defaultText = lib.literalExpression "lib.getExe config.plugins.llm.llmLsPackage"; + description = '' + Path to the `llm-ls` binary. + + If not set, llm.nvim will try to download the `llm-ls` binary from the internet. + As this will not work well with Nix, Nixvim is setting this automatically for you. + ''; + }; + + host = defaultNullOpts.mkStr null '' + You can also use `llm-ls` through TCP by providing a hostname. + ''; + + port = defaultNullOpts.mkUnsignedInt null '' + The port for connecting to a `llm-ls` TCP instance. + ''; + + cmd_env = defaultNullOpts.mkAttrsOf' { + type = types.anything; + pluginDefault = null; + example = { + LLM_LOG_LEVEL = "DEBUG"; + }; + description = '' + Use this option to set environment variables for the `llm-ls` process. + ''; + }; + }; + + tokenizer = defaultNullOpts.mkNullable' { + pluginDefault = null; + example.path = "/path/to/my/tokenizer.json"; + description = '' + `llm-ls` uses tokenizers to make sure the prompt fits the `context_window`. + + To configure it, you have a few options: + - No tokenization: `llm-ls` will count the number of characters instead + Leave this option set to `null` (default) + - From a local file on your disk. Set the `path` attribute. + - From a Hugging Face repository: `llm-ls` will attempt to download `tokenizer.json` at the root + of the repository + - From an HTTP endpoint: `llm-ls` will attempt to download a file via an HTTP GET request + ''; + type = + let + localFile = types.submodule { + options = { + path = mkOption { + type = types.str; + example = "/path/to/my/tokenizer.json"; + }; + }; + }; + + huggingFaceRepository = types.submodule { + options = { + repository = mkOption { + type = types.str; + example = "myusername/myrepo"; + description = "Location of the repository."; + }; + + api_token = defaultNullOpts.mkStr null '' + Optional, in case the API token used for the backend is not the same. + ''; + }; + }; + + httpEndpoint = types.submodule { + options = { + url = mkOption { + type = types.str; + example = "https://my-endpoint.example.com/mytokenizer.json"; + description = "URL of the HTTP endpoint"; + }; + + to = mkOption { + type = types.str; + example = "/download/path/of/mytokenizer.json"; + description = "Download path."; + }; + }; + }; + in + with types; + maybeRaw (oneOf [ + localFile + huggingFaceRepository + httpEndpoint + ]); + }; + + context_window = defaultNullOpts.mkUnsignedInt 1024 '' + Size of the context window (in tokens). + ''; + + enable_suggestions_on_startup = defaultNullOpts.mkBool true '' + Lets you choose to enable or disable "suggest-as-you-type" suggestions on neovim startup. + + You can then toggle auto suggest with `LLMToggleAutoSuggest`. + ''; + + enable_suggestions_on_files = defaultNullOpts.mkNullable' { + type = with types; maybeRaw (either str (listOf str)); + pluginDefault = "*"; + example = [ + "*.py" + "*.rs" + ]; + description = '' + Lets you enable suggestions only on specific files that match the pattern matching syntax you + will provide. + + It can either be a string or a list of strings, for example: + - to match on all types of buffers: `"*"` + - to match on all files in my_project/: `"/path/to/my_project/*"` + - to match on all python and rust files: `[ "*.py" "*.rs" ]` + ''; + }; + + disable_url_path_completion = defaultNullOpts.mkBool false '' + `llm-ls` will try to add the correct path to the url to get completions if it does not already + end with said path. + + You can disable this behavior by setting this option to `true`. + ''; +} diff --git a/tests/test-sources/plugins/by-name/llm/default.nix b/tests/test-sources/plugins/by-name/llm/default.nix new file mode 100644 index 00000000..975a596b --- /dev/null +++ b/tests/test-sources/plugins/by-name/llm/default.nix @@ -0,0 +1,123 @@ +{ + empty = { + plugins.llm.enable = true; + }; + + no-package = { + test.runNvim = false; + + plugins.llm = { + enable = true; + + llmLsPackage = null; + settings.lsp.bin_path = null; + }; + }; + + defaults = { + plugins.llm = { + enable = true; + + settings = { + api_token = null; + model = "bigcode/starcoder2-15b"; + backend = "huggingface"; + url = null; + tokens_to_clear = [ "<|endoftext|>" ]; + request_body = { + parameters = { + max_new_tokens = 60; + temperature = 0.2; + top_p = 0.95; + }; + }; + fim = { + enabled = true; + prefix = ""; + middle = ""; + suffix = ""; + }; + debounce_ms = 150; + accept_keymap = ""; + dismiss_keymap = ""; + tls_skip_verify_insecure = false; + lsp = { + host = null; + port = null; + cmd_env = null; + }; + tokenizer = null; + context_window = 1024; + enable_suggestions_on_startup = true; + enable_suggestions_on_files = "*"; + disable_url_path_completion = false; + }; + }; + }; + + example = { + plugins.llm = { + enable = true; + + settings = { + max_tokens = 1024; + url = "https://open.bigmodel.cn/api/paas/v4/chat/completions"; + model = "glm-4-flash"; + prefix = { + user = { + text = "😃 "; + hl = "Title"; + }; + assistant = { + text = "⚡ "; + hl = "Added"; + }; + }; + save_session = true; + max_history = 15; + keys = { + "Input:Submit" = { + mode = "n"; + key = ""; + }; + "Input:Cancel" = { + mode = "n"; + key = ""; + }; + "Input:Resend" = { + mode = "n"; + key = ""; + }; + "Input:HistoryNext" = { + mode = "n"; + key = ""; + }; + "Input:HistoryPrev" = { + mode = "n"; + key = ""; + }; + "Output:Ask" = { + mode = "n"; + key = "i"; + }; + "Output:Cancel" = { + mode = "n"; + key = ""; + }; + "Output:Resend" = { + mode = "n"; + key = ""; + }; + "Session:Toggle" = { + mode = "n"; + key = "ac"; + }; + "Session:Close" = { + mode = "n"; + key = ""; + }; + }; + }; + }; + }; +}