models: - model: Sao10K/L3.1-70B-Hanami-x1 - model: TareksLab/Anathema-V7-LLaMA-70B merge_method: slerp base_model: Sao10K/L3.1-70B-Hanami-x1 parameters: t: [0.1, 0.2, 0.4, 0.6, 0.6, 0.4, 0.2, 0.1] dtype: bfloat16 tokenizer_source: base