Spaces:
Running
Running
ggml : add build-time message to remind about ggml_set_rows (llama/14661)
Browse files
ggml/src/ggml-cann/ggml-cann.cpp
CHANGED
|
@@ -2090,6 +2090,7 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev,
|
|
| 2090 |
{
|
| 2091 |
// TODO: add support
|
| 2092 |
// ref: https://github.com/ggml-org/llama.cpp/pull/14274
|
|
|
|
| 2093 |
return false;
|
| 2094 |
} break;
|
| 2095 |
case GGML_OP_CPY: {
|
|
|
|
| 2090 |
{
|
| 2091 |
// TODO: add support
|
| 2092 |
// ref: https://github.com/ggml-org/llama.cpp/pull/14274
|
| 2093 |
+
#pragma message("TODO: implement F32, F16, BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, IQ4_NL support (https://github.com/ggml-org/llama.cpp/pull/14661)")
|
| 2094 |
return false;
|
| 2095 |
} break;
|
| 2096 |
case GGML_OP_CPY: {
|
ggml/src/ggml-cuda/ggml-cuda.cu
CHANGED
|
@@ -3222,6 +3222,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
|
| 3222 |
} break;
|
| 3223 |
case GGML_OP_SET_ROWS:
|
| 3224 |
{
|
|
|
|
| 3225 |
return (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) &&
|
| 3226 |
op->src[0]->type == GGML_TYPE_F32 &&
|
| 3227 |
op->src[1]->type == GGML_TYPE_I64;
|
|
|
|
| 3222 |
} break;
|
| 3223 |
case GGML_OP_SET_ROWS:
|
| 3224 |
{
|
| 3225 |
+
#pragma message("TODO: implement BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, IQ4_NL support (https://github.com/ggml-org/llama.cpp/pull/14661)")
|
| 3226 |
return (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) &&
|
| 3227 |
op->src[0]->type == GGML_TYPE_F32 &&
|
| 3228 |
op->src[1]->type == GGML_TYPE_I64;
|
ggml/src/ggml-opencl/ggml-opencl.cpp
CHANGED
|
@@ -2280,6 +2280,7 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
|
|
| 2280 |
{
|
| 2281 |
// TODO: add support
|
| 2282 |
// ref: https://github.com/ggml-org/llama.cpp/pull/14274
|
|
|
|
| 2283 |
if (op->src[0]->type != GGML_TYPE_F32) {
|
| 2284 |
return false;
|
| 2285 |
}
|
|
|
|
| 2280 |
{
|
| 2281 |
// TODO: add support
|
| 2282 |
// ref: https://github.com/ggml-org/llama.cpp/pull/14274
|
| 2283 |
+
#pragma message("TODO: implement BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, IQ4_NL support (https://github.com/ggml-org/llama.cpp/pull/14661)")
|
| 2284 |
if (op->src[0]->type != GGML_TYPE_F32) {
|
| 2285 |
return false;
|
| 2286 |
}
|
ggml/src/ggml-sycl/ggml-sycl.cpp
CHANGED
|
@@ -4303,6 +4303,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g
|
|
| 4303 |
{
|
| 4304 |
// TODO: add support
|
| 4305 |
// ref: https://github.com/ggml-org/llama.cpp/pull/14274
|
|
|
|
| 4306 |
return (op->type == GGML_TYPE_F32 || (op->type == GGML_TYPE_F16 && op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_I64));
|
| 4307 |
} break;
|
| 4308 |
case GGML_OP_CPY:
|
|
|
|
| 4303 |
{
|
| 4304 |
// TODO: add support
|
| 4305 |
// ref: https://github.com/ggml-org/llama.cpp/pull/14274
|
| 4306 |
+
#pragma message("TODO: implement BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, IQ4_NL support (https://github.com/ggml-org/llama.cpp/pull/14661)")
|
| 4307 |
return (op->type == GGML_TYPE_F32 || (op->type == GGML_TYPE_F16 && op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_I64));
|
| 4308 |
} break;
|
| 4309 |
case GGML_OP_CPY:
|