Skip to content

Commit

Permalink
feat: add flux model (#132)
Browse files Browse the repository at this point in the history
* first commit

* add flux impl

* clean up the code

* add changes

* add logging

* add more detailed loggings

* running issues

* add changes
  • Loading branch information
jorgeantonio21 authored Sep 2, 2024
1 parent 614c652 commit 78555e6
Show file tree
Hide file tree
Showing 6 changed files with 465 additions and 9 deletions.
14 changes: 11 additions & 3 deletions atoma-inference/src/model_thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ use crate::models::candle::mixtral_nccl::MixtralNcclModel;

use crate::models::{
candle::{
falcon::FalconModel, llama::LlamaModel, mamba::MambaModel, mistral::MistralModel,
mixtral::MixtralModel, phi3::Phi3Model, quantized::QuantizedModel, qwen::QwenModel,
stable_diffusion::StableDiffusion,
falcon::FalconModel, flux::Flux, llama::LlamaModel, mamba::MambaModel,
mistral::MistralModel, mixtral::MixtralModel, phi3::Phi3Model, quantized::QuantizedModel,
qwen::QwenModel, stable_diffusion::StableDiffusion,
},
config::{ModelConfig, ModelsConfig},
types::{LlmOutput, ModelType},
Expand Down Expand Up @@ -288,6 +288,14 @@ pub(crate) fn dispatch_model_thread(
stream_tx,
)
}
ModelType::FluxSchnell | ModelType::FluxDev => spawn_model_thread::<Flux>(
model_name,
api_key.clone(),
cache_dir.clone(),
model_config,
model_receiver,
stream_tx,
),
ModelType::LlamaV1
| ModelType::LlamaV2
| ModelType::LlamaTinyLlama1_1BChat
Expand Down
Loading

0 comments on commit 78555e6

Please sign in to comment.