Spaces:
Running
Running
ggml : resolve merge conflicts (ggml/0)
Browse files- ggml-backend.c +8 -8
ggml-backend.c
CHANGED
|
@@ -1041,7 +1041,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
|
|
| 1041 |
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
| 1042 |
const struct ggml_tensor * src = tensor->src[i];
|
| 1043 |
if (src == NULL) {
|
| 1044 |
-
|
| 1045 |
}
|
| 1046 |
if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
|
| 1047 |
int src_backend = ggml_backend_sched_backend_from_buffer(sched, src->buffer);
|
|
@@ -1088,7 +1088,7 @@ static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, str
|
|
| 1088 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1089 |
struct ggml_tensor * src = node->src[j];
|
| 1090 |
if (src == NULL) {
|
| 1091 |
-
|
| 1092 |
}
|
| 1093 |
ggml_backend_t src_backend = tensor_backend(src);
|
| 1094 |
fprintf(stderr, " %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
|
|
@@ -1144,7 +1144,7 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
|
|
| 1144 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1145 |
struct ggml_tensor * src = node->src[j];
|
| 1146 |
if (src == NULL) {
|
| 1147 |
-
|
| 1148 |
}
|
| 1149 |
if (tensor_backend_id(src) == -1) {
|
| 1150 |
tensor_backend_id(src) = ggml_backend_sched_backend_id_from_cur(sched, src);
|
|
@@ -1256,7 +1256,7 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
|
|
| 1256 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1257 |
struct ggml_tensor * src = node->src[j];
|
| 1258 |
if (src == NULL) {
|
| 1259 |
-
|
| 1260 |
}
|
| 1261 |
int src_backend_id = tensor_backend_id(src);
|
| 1262 |
if (src_backend_id == -1) {
|
|
@@ -1315,7 +1315,7 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
|
|
| 1315 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1316 |
struct ggml_tensor * src = node->src[j];
|
| 1317 |
if (src == NULL) {
|
| 1318 |
-
|
| 1319 |
}
|
| 1320 |
int src_backend_id = tensor_backend_id(src);
|
| 1321 |
assert(src_backend_id != -1); // all inputs should be assigned by now
|
|
@@ -1362,7 +1362,7 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
|
|
| 1362 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1363 |
struct ggml_tensor * src = node->src[j];
|
| 1364 |
if (src == NULL) {
|
| 1365 |
-
|
| 1366 |
}
|
| 1367 |
ggml_backend_t src_backend = tensor_backend(src);
|
| 1368 |
if (src_backend != tensor_backend /* && src_backend != NULL */) {
|
|
@@ -1668,7 +1668,7 @@ static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set,
|
|
| 1668 |
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
| 1669 |
struct ggml_tensor * s = src->src[i];
|
| 1670 |
if (s == NULL) {
|
| 1671 |
-
|
| 1672 |
}
|
| 1673 |
dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
|
| 1674 |
}
|
|
@@ -1697,7 +1697,7 @@ static void graph_copy_init_tensor(struct ggml_hash_set hash_set, struct ggml_te
|
|
| 1697 |
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
| 1698 |
struct ggml_tensor * s = src->src[i];
|
| 1699 |
if (s == NULL) {
|
| 1700 |
-
|
| 1701 |
}
|
| 1702 |
graph_copy_init_tensor(hash_set, node_copies, node_init, s);
|
| 1703 |
}
|
|
|
|
| 1041 |
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
| 1042 |
const struct ggml_tensor * src = tensor->src[i];
|
| 1043 |
if (src == NULL) {
|
| 1044 |
+
continue;
|
| 1045 |
}
|
| 1046 |
if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
|
| 1047 |
int src_backend = ggml_backend_sched_backend_from_buffer(sched, src->buffer);
|
|
|
|
| 1088 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1089 |
struct ggml_tensor * src = node->src[j];
|
| 1090 |
if (src == NULL) {
|
| 1091 |
+
continue;
|
| 1092 |
}
|
| 1093 |
ggml_backend_t src_backend = tensor_backend(src);
|
| 1094 |
fprintf(stderr, " %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
|
|
|
|
| 1144 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1145 |
struct ggml_tensor * src = node->src[j];
|
| 1146 |
if (src == NULL) {
|
| 1147 |
+
continue;
|
| 1148 |
}
|
| 1149 |
if (tensor_backend_id(src) == -1) {
|
| 1150 |
tensor_backend_id(src) = ggml_backend_sched_backend_id_from_cur(sched, src);
|
|
|
|
| 1256 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1257 |
struct ggml_tensor * src = node->src[j];
|
| 1258 |
if (src == NULL) {
|
| 1259 |
+
continue;
|
| 1260 |
}
|
| 1261 |
int src_backend_id = tensor_backend_id(src);
|
| 1262 |
if (src_backend_id == -1) {
|
|
|
|
| 1315 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1316 |
struct ggml_tensor * src = node->src[j];
|
| 1317 |
if (src == NULL) {
|
| 1318 |
+
continue;
|
| 1319 |
}
|
| 1320 |
int src_backend_id = tensor_backend_id(src);
|
| 1321 |
assert(src_backend_id != -1); // all inputs should be assigned by now
|
|
|
|
| 1362 |
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
| 1363 |
struct ggml_tensor * src = node->src[j];
|
| 1364 |
if (src == NULL) {
|
| 1365 |
+
continue;
|
| 1366 |
}
|
| 1367 |
ggml_backend_t src_backend = tensor_backend(src);
|
| 1368 |
if (src_backend != tensor_backend /* && src_backend != NULL */) {
|
|
|
|
| 1668 |
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
| 1669 |
struct ggml_tensor * s = src->src[i];
|
| 1670 |
if (s == NULL) {
|
| 1671 |
+
continue;
|
| 1672 |
}
|
| 1673 |
dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
|
| 1674 |
}
|
|
|
|
| 1697 |
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
| 1698 |
struct ggml_tensor * s = src->src[i];
|
| 1699 |
if (s == NULL) {
|
| 1700 |
+
continue;
|
| 1701 |
}
|
| 1702 |
graph_copy_init_tensor(hash_set, node_copies, node_init, s);
|
| 1703 |
}
|