From 5470a4fce3fedf097ecd7d28fdd83a770a692d67 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Mon, 24 Jul 2023 10:51:24 -0700 Subject: [PATCH] [docs] Other modalities (#4205) remove coming soon, rl pipeline --- docs/source/en/_toctree.yml | 10 ++---- docs/source/en/api/experimental/rl.mdx | 15 --------- .../api/pipelines/value_guided_sampling.mdx | 32 +++++++++++++++++++ docs/source/en/using-diffusers/audio.mdx | 16 ---------- docs/source/en/using-diffusers/rl.mdx | 25 --------------- .../experimental/rl/value_guided_sampling.py | 16 ++++++---- 6 files changed, 43 insertions(+), 71 deletions(-) delete mode 100644 docs/source/en/api/experimental/rl.mdx create mode 100644 docs/source/en/api/pipelines/value_guided_sampling.mdx delete mode 100644 docs/source/en/using-diffusers/audio.mdx delete mode 100644 docs/source/en/using-diffusers/rl.mdx diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 3dc17d6202..a75e2b7112 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -88,10 +88,6 @@ title: Custom Diffusion title: Training - sections: - - local: using-diffusers/rl - title: Reinforcement Learning - - local: using-diffusers/audio - title: Audio - local: using-diffusers/other-modalities title: Other Modalities title: Taking Diffusers Beyond Images @@ -276,6 +272,8 @@ title: Unconditional Latent Diffusion - local: api/pipelines/unidiffuser title: UniDiffuser + - local: api/pipelines/value_guided_sampling + title: Value-guided sampling - local: api/pipelines/versatile_diffusion title: Versatile Diffusion - local: api/pipelines/vq_diffusion @@ -331,8 +329,4 @@ - local: api/schedulers/vq_diffusion title: VQDiffusionScheduler title: Schedulers - - sections: - - local: api/experimental/rl - title: RL Planning - title: Experimental Features title: API diff --git a/docs/source/en/api/experimental/rl.mdx b/docs/source/en/api/experimental/rl.mdx deleted file mode 100644 index 66c8db311b..0000000000 --- a/docs/source/en/api/experimental/rl.mdx +++ /dev/null @@ -1,15 +0,0 @@ - - -# TODO - -Coming soon! \ No newline at end of file diff --git a/docs/source/en/api/pipelines/value_guided_sampling.mdx b/docs/source/en/api/pipelines/value_guided_sampling.mdx new file mode 100644 index 0000000000..0509b196b5 --- /dev/null +++ b/docs/source/en/api/pipelines/value_guided_sampling.mdx @@ -0,0 +1,32 @@ + + +# Value-guided planning + + + +🧪 This is an experimental pipeline for reinforcement learning! + + + +This pipeline is based on the [Planning with Diffusion for Flexible Behavior Synthesis](https://huggingface.co/papers/2205.09991) paper by Michael Janner, Yilun Du, Joshua B. Tenenbaum, Sergey Levine. + +The abstract from the paper is: + +*Model-based reinforcement learning methods often use learning only for the purpose of estimating an approximate dynamics model, offloading the rest of the decision-making work to classical trajectory optimizers. While conceptually simple, this combination has a number of empirical shortcomings, suggesting that learned models may not be well-suited to standard trajectory optimization. In this paper, we consider what it would look like to fold as much of the trajectory optimization pipeline as possible into the modeling problem, such that sampling from the model and planning with it become nearly identical. The core of our technical approach lies in a diffusion probabilistic model that plans by iteratively denoising trajectories. We show how classifier-guided sampling and image inpainting can be reinterpreted as coherent planning strategies, explore the unusual and useful properties of diffusion-based planning methods, and demonstrate the effectiveness of our framework in control settings that emphasize long-horizon decision-making and test-time flexibility*. + +You can find additional information about the model on the [project page](https://diffusion-planning.github.io/), the [original codebase](https://github.com/jannerm/diffuser), or try it out in a demo [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_with_diffusers.ipynb). + +The script to run the model is available [here](https://github.com/huggingface/diffusers/tree/main/examples/reinforcement_learning). + +## ValueGuidedRLPipeline +[[autodoc]] diffusers.experimental.ValueGuidedRLPipeline \ No newline at end of file diff --git a/docs/source/en/using-diffusers/audio.mdx b/docs/source/en/using-diffusers/audio.mdx deleted file mode 100644 index e1d669882f..0000000000 --- a/docs/source/en/using-diffusers/audio.mdx +++ /dev/null @@ -1,16 +0,0 @@ - - -# Using Diffusers for audio - -[`DanceDiffusionPipeline`] and [`AudioDiffusionPipeline`] can be used to generate -audio rapidly! More coming soon! \ No newline at end of file diff --git a/docs/source/en/using-diffusers/rl.mdx b/docs/source/en/using-diffusers/rl.mdx deleted file mode 100644 index 0cbf46b2a3..0000000000 --- a/docs/source/en/using-diffusers/rl.mdx +++ /dev/null @@ -1,25 +0,0 @@ - - -# Using Diffusers for reinforcement learning - -Support for one RL model and related pipelines is included in the `experimental` source of diffusers. -More models and examples coming soon! - -# Diffuser Value-guided Planning - -You can run the model from [*Planning with Diffusion for Flexible Behavior Synthesis*](https://arxiv.org/abs/2205.09991) with Diffusers. -The script is located in the [RL Examples](https://github.com/huggingface/diffusers/tree/main/examples/rl) folder. - -Or, run this example in Colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_with_diffusers.ipynb) - -[[autodoc]] diffusers.experimental.ValueGuidedRLPipeline \ No newline at end of file diff --git a/src/diffusers/experimental/rl/value_guided_sampling.py b/src/diffusers/experimental/rl/value_guided_sampling.py index e4af4986fa..e58952aa20 100644 --- a/src/diffusers/experimental/rl/value_guided_sampling.py +++ b/src/diffusers/experimental/rl/value_guided_sampling.py @@ -24,19 +24,21 @@ from ...utils.dummy_pt_objects import DDPMScheduler class ValueGuidedRLPipeline(DiffusionPipeline): r""" - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - Pipeline for sampling actions from a diffusion model trained to predict sequences of states. + Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states. - Original implementation inspired by this repository: https://github.com/jannerm/diffuser. + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: - value_function ([`UNet1DModel`]): A specialized UNet for fine-tuning trajectories base on reward. - unet ([`UNet1DModel`]): U-Net architecture to denoise the encoded trajectories. + value_function ([`UNet1DModel`]): + A specialized UNet for fine-tuning trajectories base on reward. + unet ([`UNet1DModel`]): + UNet architecture to denoise the encoded trajectories. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this application is [`DDPMScheduler`]. - env: An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models. + env (): + An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models. """ def __init__(