Skip to content

Commit 1e72471

Browse files
committed
fix potential duplicate issue
1 parent 62ba7f7 commit 1e72471

File tree

3 files changed

+26
-15
lines changed

3 files changed

+26
-15
lines changed

lora.hpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
#ifndef __LORA_HPP__
22
#define __LORA_HPP__
33

4-
#include "ggml_extend.hpp"
54
#include <mutex>
5+
#include "ggml_extend.hpp"
66

77
#define LORA_GRAPH_BASE_SIZE 10240
88

@@ -157,13 +157,13 @@ struct LoraModel : public GGMLRunner {
157157
model_loader.load_tensors(on_new_tensor_cb, n_threads);
158158

159159
for (const auto& pair : tensors_to_create) {
160-
const auto& name = pair.first;
161-
const auto& ts = pair.second;
160+
const auto& name = pair.first;
161+
const auto& ts = pair.second;
162162
struct ggml_tensor* real = ggml_new_tensor(params_ctx,
163163
ts.type,
164164
ts.n_dims,
165165
ts.ne);
166-
lora_tensors[name] = real;
166+
lora_tensors[name] = real;
167167
}
168168

169169
alloc_params_buffer();

model.cpp

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1982,8 +1982,13 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
19821982
std::vector<TensorStorage> processed_tensor_storages;
19831983

19841984
{
1985-
std::unordered_map<std::string, TensorStorage> processed_map;
1986-
std::mutex map_mutex;
1985+
struct IndexedStorage {
1986+
size_t index;
1987+
TensorStorage ts;
1988+
};
1989+
1990+
std::mutex vec_mutex;
1991+
std::vector<IndexedStorage> all_results;
19871992

19881993
int n_threads = std::min(num_threads_to_use, (int)tensor_storages.size());
19891994
if (n_threads < 1) {
@@ -1993,7 +1998,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
19931998

19941999
for (int i = 0; i < n_threads; ++i) {
19952000
workers.emplace_back([&, thread_id = i]() {
1996-
std::unordered_map<std::string, TensorStorage> local_processed_map;
2001+
std::vector<IndexedStorage> local_results;
19972002
std::vector<TensorStorage> temp_storages;
19982003

19992004
for (size_t j = thread_id; j < tensor_storages.size(); j += n_threads) {
@@ -2006,23 +2011,29 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
20062011
preprocess_tensor(tensor_storage, temp_storages);
20072012

20082013
for (const auto& ts : temp_storages) {
2009-
local_processed_map[ts.name] = ts;
2014+
local_results.push_back({j, ts});
20102015
}
20112016
}
20122017

2013-
if (!local_processed_map.empty()) {
2014-
std::lock_guard<std::mutex> lock(map_mutex);
2015-
processed_map.merge(local_processed_map);
2018+
if (!local_results.empty()) {
2019+
std::lock_guard<std::mutex> lock(vec_mutex);
2020+
all_results.insert(all_results.end(),
2021+
local_results.begin(), local_results.end());
20162022
}
20172023
});
20182024
}
20192025
for (auto& w : workers) {
20202026
w.join();
20212027
}
20222028

2023-
processed_tensor_storages.reserve(processed_map.size());
2024-
for (auto const& [name, ts] : processed_map) {
2025-
processed_tensor_storages.push_back(ts);
2029+
std::unordered_map<std::string, IndexedStorage> latest_map;
2030+
for (auto& entry : all_results) {
2031+
latest_map[entry.ts.name] = entry;
2032+
}
2033+
2034+
processed_tensor_storages.reserve(latest_map.size());
2035+
for (auto& [name, entry] : latest_map) {
2036+
processed_tensor_storages.push_back(entry.ts);
20262037
}
20272038
}
20282039

model.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ class ModelLoader {
250250
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0);
251251
bool load_tensors(std::map<std::string, struct ggml_tensor*>& tensors,
252252
std::set<std::string> ignore_tensors = {},
253-
int n_threads = 0);
253+
int n_threads = 0);
254254

255255
bool save_to_gguf_file(const std::string& file_path, ggml_type type, const std::string& tensor_type_rules);
256256
bool tensor_should_be_converted(const TensorStorage& tensor_storage, ggml_type type);

0 commit comments

Comments
 (0)