blob: 78801659b53f6b8bba38131e60994b7db9c41410 (
about) (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
{
lib,
buildPythonPackage,
fetchPypi,
google-generativeai,
llama-index-core,
poetry-core,
pythonOlder,
}:
buildPythonPackage rec {
pname = "llama-index-embeddings-gemini";
version = "0.3.1";
pyproject = true;
disabled = pythonOlder "3.9";
src = fetchPypi {
pname = "llama_index_embeddings_gemini";
inherit version;
hash = "sha256-Bt6NazogM8O5QDtrIN2uooKMCwzLseOU/v5H9e0hvEk=";
};
pythonRelaxDeps = [ "google-generativeai" ];
build-system = [ poetry-core ];
dependencies = [
google-generativeai
llama-index-core
];
# Tests are only available in the mono repo
doCheck = false;
pythonImportsCheck = [ "llama_index.embeddings.gemini" ];
meta = with lib; {
description = "LlamaIndex Llms Integration for Gemini";
homepage = "https://github.com/run-llama/llama_index/tree/main/llama-index-integrations/embeddings/llama-index-embeddings-gemini";
license = licenses.mit;
maintainers = with maintainers; [ fab ];
};
}
|