From cd3cc1ce0ec85fb7060e59aa55ded563543e3fb7 Mon Sep 17 00:00:00 2001 From: laurent Date: Fri, 18 Jul 2025 08:27:30 +0200 Subject: [PATCH] Sketch a FAQ and add some issue templates. --- .github/ISSUE_TEMPLATE/bug.yml | 83 +++++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/question.yml | 40 ++++++++++++++ .gitignore | 1 + FAQ.md | 30 +++++++++++ README.md | 4 ++ 5 files changed, 158 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug.yml create mode 100644 .github/ISSUE_TEMPLATE/question.yml create mode 100644 FAQ.md diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 0000000..444751a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,83 @@ +name: Bug Report +description: You found a bug. +labels: ["bug", "triage"] +body: + - type: markdown + attributes: + value: | + Please first check the [FAQ](https://github.com/kyutai-labs/delayed-streams-modeling/blob/main/FAQ.md). + - type: dropdown + id: backend + attributes: + label: Backend impacted + description: Which backend is concerned with your bug report? + options: + - The PyTorch implementation + - The MLX implementation + - The Rust implementation + - Other / All + default: 0 + validations: + required: true + - type: dropdown + id: os + attributes: + label: Operating system + description: What is your operating system? + options: + - Linux + - Mac OS X + - Windows (unsupported) + default: 0 + validations: + required: true + - type: dropdown + id: hardware + attributes: + label: Hardware + description: What hardware are you using? + options: + - CPU + - GPU with CUDA + - Metal with MLX + default: 0 + validations: + required: true + - type: textarea + id: description + attributes: + label: Description + description: Provide a detailed description of your bug. + placeholder: + value: + validations: + required: true + - type: textarea + id: more_info + attributes: + label: Extra information + description: Please provide any other relevant information, such as log extracts, code etc. + placeholder: + value: + validations: + required: true + - type: textarea + id: env + attributes: + label: Environment + description: Please provide any other relevant information, such as log extracts, code etc. + placeholder: + value: | + Fill in the following information on your system. + - Operating system version: + + If the backend impacted is PyTorch: + - Python version: + - PyTorch version: + - CUDA version (run `python -c 'import torch; print(torch.version.cuda)'`): + - GPU model and memory: + + If the backend is MLX: + - Mac model: + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml new file mode 100644 index 0000000..272307c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.yml @@ -0,0 +1,40 @@ +name: Question +description: You have a question about the codebase, the paper, or the implementation. +labels: ["question", "triage"] +body: + - type: markdown + attributes: + value: | + Please first check the [FAQ](https://github.com/kyutai-labs/delayed-streams-modeling/blob/main/FAQ.md). + - type: checkboxes + id: terms + attributes: + label: Due diligence + description: Have you searched the existing issues / FAQ / Google / asked ChatGPT? + options: + - label: I have done my due diligence in trying to find the answer myself. + required: true + + - type: dropdown + id: backend + attributes: + label: Topic + description: What is your question about? + options: + - The paper + - The PyTorch implementation + - The MLX implementation + - The Rust implementation + - Other / All + default: 0 + validations: + required: true + - type: textarea + id: question + attributes: + label: Question + description: What is your question? + placeholder: Your question. Please make sure this is directly related to our codebase. We will not provide support for installing PyTorch, CUDA, Rust etc. + value: + validations: + required: true diff --git a/.gitignore b/.gitignore index ba90038..e771139 100644 --- a/.gitignore +++ b/.gitignore @@ -192,3 +192,4 @@ cython_debug/ # refer to https://docs.cursor.com/context/ignore-files .cursorignore .cursorindexingignore +out*.wav diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 0000000..39ad77b --- /dev/null +++ b/FAQ.md @@ -0,0 +1,30 @@ +# FAQ + +Here is the answer to a number of frequently asked questions. + +### Torch Compilation Errors + +With some PyTorch/triton versions, one might encounter compilation errors +like the following: +``` + Traceback (most recent call last): + ... + File "site-packages/torch/_inductor/runtime/triton_heuristics.py", line 1153, in make_launcher + "launch_enter_hook": binary.__class__.launch_enter_hook, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +torch._inductor.exc.InductorError: AttributeError: type object 'CompiledKernel' has no attribute 'launch_enter_hook' +``` + +If that's the case, you can disable torch compilation by setting the following +environment variable. +```bash +export NO_TORCH_COMPILE=1 +``` + +### Will you release training code? + +Some finetuning code can be found in the [kyutai-labs/moshi-finetune repo](https://github.com/kyutai-labs/moshi-finetune). +This code has not been adapted to the Speech-To-Text and Text-To-Speech models +yet, but it should be a good starting point. + + diff --git a/README.md b/README.md index 8df1709..4d4f5d3 100644 --- a/README.md +++ b/README.md @@ -305,6 +305,10 @@ If you have [uv](https://docs.astral.sh/uv/) installed, you can skip the install and just prefix the command above with `uvx --with moshi-mlx`. +## FAQ + +Checkout the [Frequently Asked Questions](FAQ.md) section before opening an issue. + ## License The present code is provided under the MIT license for the Python parts, and Apache license for the Rust backend.