Skip to content

Commit

Permalink
v0.12.6 (#17305)
Browse files Browse the repository at this point in the history
  • Loading branch information
logan-markewich authored Dec 18, 2024
1 parent b24e6ea commit 6d770ae
Show file tree
Hide file tree
Showing 11 changed files with 269 additions and 77 deletions.
94 changes: 94 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,95 @@
# ChangeLog

## [2024-12-17]

### `llama-index-core` [0.12.6]

- [bug fix] Ensure that StopEvent gets cleared from Context.\_in_progress["_done"] after a Workflow run (#17300)
- fix: add a timeout to langchain callback handler (#17296)
- tweak User vs tool in react prompts (#17273)
- refact: Refactor Document to be natively multimodal (#17204)
- fix: make ImageDocument derive from Document, backward compatible (#17259)
- fix: accept already base64-encoded data in ImageBlock (#17244)
- fix(metrics): fixed NDCG calculation and updated previous tests (#17236)
- fix: remove llama-index-legacy dependency in llama-index-core (#17231)
- Refined the default documentation generation for function tools (#17208)

### `llama-index-embeddings-voyageai` [0.3.3]

- add support for voyageai >=0.3.0 (#17120)
- Introducting VoyageAI's new multimodal embeddings model (#17261)
- VoyageAI multimodal embedding, correction (#17284)

### `llama-index-experimental` [0.5.2]

- Fixed import errors for experimental JSONalyzeQueryEngine (#17228)

### `llama-index-grapg-stores-neo4j` [0.4.4]

- Add cypher corrector and allow graph schema filtering (#17223)
- Add timeout config to neo4j graph (#17267)
- Add text and embedding type to neo4j enhanced schema (#17289)

### `llama-index-llms-anthropic` [0.6.3]

- add content blocks to anthropic (#17274)
- Do not send blank content to anthropic (#17278)
- Update anthropic type imports for v0.41.0 release (#17299)
- Fix Anthropic tokenizer protocol (fix by Devin) (#17201)

### `llama-index-llms-bedrock` [0.3.3]

- Add Amazon bedrock guardrails (#17281)

### `llama-index-llms-bedrock-converse` [0.4.2]

- Add Amazon bedrock guardrails (#17281)

### `llama-index-llms-gemini` [0.4.1]

- Gemini 2.0 support (#17249)

### `llama-index-llms-mistralai` [0.3.1]

- add tool call id/name to mistral chat messages (#17280)

### `llama-index-llms-nvidia` [0.3.1]

- Adding llama 3.3-70b as function-calling-capable (#17253)

### `llama-index-llms-openai` [0.3.10]

- fix openai message dicts for tool calls (#17254)

### `llama-index-llms-text-generation-inference` [0.3.1]

- Fix: TGI context window (#17252)

### `llama-index-multi-modal-llms-anthropic` [0.3.1]

- handle more response types in anthropic multi modal llms (#17302)

### `llama-index-readers-confluence` [0.3.1]

- Support Confluence cookies (#17276)

### `llama-index-vector-stores-milvus` [0.4.0]

- Parse "milvus_search_config" out of "vector_store_kwargs" (#17221)
- refactor and optimize milvus code (#17229)

### `llama-index-vector-stores-pinecone` [0.4.2]

- Handle empty retrieved Pinecone index values (#17242)

### `llama-index-vector-stores-qdrant` [0.4.1]

- feat: Add NOT filter condition to MetadataFilter and QdrantVectorStore (#17270)

### `llama-index-vector-stores-weaviate` [1.3.0]

- Add async support to weaviate vector store integration (#17220)

## [2024-12-09]

### `llama-index-core` [0.12.5]
Expand All @@ -14,6 +104,10 @@

- feat: integration on pinecone hosted rerankers (#17192)

### `llama-index-tools-scrapegraph` [0.1.0]

- Add Scrapegraph tool integration (#17238)

### `llama-index-vector-stores-postgres` [0.3.3]

- Update pgvector dependency to version 0.3.6 (#17195)
Expand Down
94 changes: 94 additions & 0 deletions docs/docs/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,95 @@
# ChangeLog

## [2024-12-17]

### `llama-index-core` [0.12.6]

- [bug fix] Ensure that StopEvent gets cleared from Context._in_progress["_done"] after a Workflow run (#17300)
- fix: add a timeout to langchain callback handler (#17296)
- tweak User vs tool in react prompts (#17273)
- refact: Refactor Document to be natively multimodal (#17204)
- fix: make ImageDocument derive from Document, backward compatible (#17259)
- fix: accept already base64-encoded data in ImageBlock (#17244)
- fix(metrics): fixed NDCG calculation and updated previous tests (#17236)
- fix: remove llama-index-legacy dependency in llama-index-core (#17231)
- Refined the default documentation generation for function tools (#17208)

### `llama-index-embeddings-voyageai` [0.3.3]

- add support for voyageai >=0.3.0 (#17120)
- Introducting VoyageAI's new multimodal embeddings model (#17261)
- VoyageAI multimodal embedding, correction (#17284)

### `llama-index-experimental` [0.5.2]

- Fixed import errors for experimental JSONalyzeQueryEngine (#17228)

### `llama-index-grapg-stores-neo4j` [0.4.4]

- Add cypher corrector and allow graph schema filtering (#17223)
- Add timeout config to neo4j graph (#17267)
- Add text and embedding type to neo4j enhanced schema (#17289)

### `llama-index-llms-anthropic` [0.6.3]

- add content blocks to anthropic (#17274)
- Do not send blank content to anthropic (#17278)
- Update anthropic type imports for v0.41.0 release (#17299)
- Fix Anthropic tokenizer protocol (fix by Devin) (#17201)

### `llama-index-llms-bedrock` [0.3.3]

- Add Amazon bedrock guardrails (#17281)

### `llama-index-llms-bedrock-converse` [0.4.2]

- Add Amazon bedrock guardrails (#17281)

### `llama-index-llms-gemini` [0.4.1]

- Gemini 2.0 support (#17249)

### `llama-index-llms-mistralai` [0.3.1]

- add tool call id/name to mistral chat messages (#17280)

### `llama-index-llms-nvidia` [0.3.1]

- Adding llama 3.3-70b as function-calling-capable (#17253)

### `llama-index-llms-openai` [0.3.10]

- fix openai message dicts for tool calls (#17254)

### `llama-index-llms-text-generation-inference` [0.3.1]

- Fix: TGI context window (#17252)

### `llama-index-multi-modal-llms-anthropic` [0.3.1]

- handle more response types in anthropic multi modal llms (#17302)

### `llama-index-readers-confluence` [0.3.1]

- Support Confluence cookies (#17276)

### `llama-index-vector-stores-milvus` [0.4.0]

- Parse "milvus_search_config" out of "vector_store_kwargs" (#17221)
- refactor and optimize milvus code (#17229)

### `llama-index-vector-stores-pinecone` [0.4.2]

- Handle empty retrieved Pinecone index values (#17242)

### `llama-index-vector-stores-qdrant` [0.4.1]

- feat: Add NOT filter condition to MetadataFilter and QdrantVectorStore (#17270)

### `llama-index-vector-stores-weaviate` [1.3.0]

- Add async support to weaviate vector store integration (#17220)

## [2024-12-09]

### `llama-index-core` [0.12.5]
Expand All @@ -14,6 +104,10 @@

- feat: integration on pinecone hosted rerankers (#17192)

### `llama-index-tools-scrapegraph` [0.1.0]

- Add Scrapegraph tool integration (#17238)

### `llama-index-vector-stores-postgres` [0.3.3]

- Update pgvector dependency to version 0.3.6 (#17195)
Expand Down
4 changes: 4 additions & 0 deletions docs/docs/api_reference/tools/scrapegraph.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
::: llama_index.tools.scrapegraph
options:
members:
- ScrapegraphToolSpec
8 changes: 4 additions & 4 deletions docs/mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ nav:
- ./examples/cookbooks/contextual_retrieval.ipynb
- ./examples/cookbooks/crewai_llamaindex.ipynb
- ./examples/cookbooks/llama3_cookbook.ipynb
- ./examples/cookbooks/llama3_cookbook_gaudi.ipynb
- ./examples/cookbooks/llama3_cookbook_groq.ipynb
- ./examples/cookbooks/llama3_cookbook_ollama_replicate.ipynb
- ./examples/cookbooks/mistralai.ipynb
Expand Down Expand Up @@ -256,7 +257,6 @@ nav:
- ./examples/embeddings/nomic.ipynb
- ./examples/embeddings/nvidia.ipynb
- ./examples/embeddings/oci_genai.ipynb
- ./examples/embeddings/octoai.ipynb
- ./examples/embeddings/ollama_embedding.ipynb
- ./examples/embeddings/openvino.ipynb
- ./examples/embeddings/optimum_intel.ipynb
Expand Down Expand Up @@ -312,6 +312,7 @@ nav:
- ./examples/ingestion/document_management_pipeline.ipynb
- ./examples/ingestion/ingestion_gdrive.ipynb
- ./examples/ingestion/parallel_execution_ingestion_pipeline.ipynb
- ./examples/ingestion/redis_ingestion_pipeline.ipynb
- LLMs:
- ./examples/llm/ai21.ipynb
- ./examples/llm/alephalpha.ipynb
Expand Down Expand Up @@ -384,9 +385,7 @@ nav:
- ./examples/llm/rungpt.ipynb
- ./examples/llm/sagemaker_endpoint_llm.ipynb
- ./examples/llm/sambanovasystems.ipynb
- ./examples/llm/solar.ipynb
- ./examples/llm/together.ipynb
- ./examples/llm/unify.ipynb
- ./examples/llm/upstage.ipynb
- ./examples/llm/vertex.ipynb
- ./examples/llm/vicuna.ipynb
Expand Down Expand Up @@ -642,7 +641,6 @@ nav:
- ./examples/vector_stores/LanternAutoRetriever.ipynb
- ./examples/vector_stores/LanternIndexDemo.ipynb
- ./examples/vector_stores/LindormDemo.ipynb
- ./examples/vector_stores/MetalIndexDemo.ipynb
- ./examples/vector_stores/MilvusHybridIndexDemo.ipynb
- ./examples/vector_stores/MilvusIndexDemo.ipynb
- ./examples/vector_stores/MilvusOperatorFunctionDemo.ipynb
Expand Down Expand Up @@ -1647,6 +1645,7 @@ nav:
- ./api_reference/tools/requests.md
- ./api_reference/tools/retriever.md
- ./api_reference/tools/salesforce.md
- ./api_reference/tools/scrapegraph.md
- ./api_reference/tools/shopify.md
- ./api_reference/tools/slack.md
- ./api_reference/tools/tavily_research.md
Expand Down Expand Up @@ -2308,6 +2307,7 @@ plugins:
- ../llama-index-integrations/llms/llama-index-llms-nebius
- ../llama-index-integrations/postprocessor/llama-index-postprocessor-bedrock-rerank
- ../llama-index-integrations/postprocessor/llama-index-postprocessor-pinecone-native-rerank
- ../llama-index-integrations/tools/llama-index-tools-scrapegraph
- redirects:
redirect_maps:
./api/llama_index.vector_stores.MongoDBAtlasVectorSearch.html: api_reference/storage/vector_store/mongodb.md
Expand Down
2 changes: 1 addition & 1 deletion llama-index-core/llama_index/core/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Init file of LlamaIndex."""

__version__ = "0.12.5"
__version__ = "0.12.6"

import logging
from logging import NullHandler
Expand Down
2 changes: 1 addition & 1 deletion llama-index-core/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ name = "llama-index-core"
packages = [{include = "llama_index"}]
readme = "README.md"
repository = "https://github.com/run-llama/llama_index"
version = "0.12.5"
version = "0.12.6"

[tool.poetry.dependencies]
SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ exclude = ["**/BUILD"]
license = "MIT"
name = "llama-index-vector-stores-nile"
readme = "README.md"
version = "0.2.0"
version = "0.2.1"

[tool.poetry.dependencies]
python = ">=3.9,<4.0"
llama-index-core = "^0.12.0"
llama-index-core = "^0.12.6"
psycopg = "^3.2"

[tool.poetry.group.dev.dependencies]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,14 @@ exclude = ["**/BUILD"]
license = "MIT"
name = "llama-index-vector-stores-postgres"
readme = "README.md"
version = "0.4.0"
version = "0.4.1"

[tool.poetry.dependencies]
python = ">=3.9,<4.0"
pgvector = ">=0.3.6,<1.0.0"
psycopg2-binary = ">=2.9.9,<3.0.0"
asyncpg = ">=0.29.0,<1.0.0"
llama-index-core = "^0.12.0"
llama-index-core = "^0.12.6"

[tool.poetry.dependencies.sqlalchemy]
extras = ["asyncio"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,13 @@ exclude = ["**/BUILD"]
license = "MIT"
name = "llama-index-vector-stores-qdrant"
readme = "README.md"
version = "0.4.0"
version = "0.4.1"

[tool.poetry.dependencies]
python = ">=3.9,<3.13"
qdrant-client = ">=1.7.1"
grpcio = "^1.60.0"
llama-index-core = "^0.12.0"
llama-index-core = "^0.12.6"

[tool.poetry.extras]
fastembed = ["fastembed"]
Expand Down
Loading

0 comments on commit 6d770ae

Please sign in to comment.