code stringlengths 141 97.3k | apis listlengths 1 24 | extract_api stringlengths 113 214k |
|---|---|---|
from llama_index import Document
import json, os
from llama_index.node_parser import SimpleNodeParser
from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex
from langchain import OpenAI
from llama_index.composability import ComposableGraph
from llama_index.data_structs.node_v2 import Node, Docu... | [
"llama_index.data_structs.node_v2.Node",
"llama_index.GPTTreeIndex.load_from_disk",
"llama_index.composability.ComposableGraph.from_indices",
"llama_index.GPTTreeIndex",
"llama_index.PromptHelper",
"llama_index.composability.ComposableGraph.load_from_disk"
] | [((705, 764), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (717, 764), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((853, 879), 'os.listdir', 'os.listdir', (['... |
# This file has been modified by the Nextpy Team in 2023 using AI tools and automation scripts.
# We have rigorously tested these modifications to ensure reliability and performance. Based on successful test results, we are confident in the quality and stability of these changes.
"""Base reader class."""
from abc imp... | [
"llama_index.schema.Document"
] | [((877, 904), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (894, 904), False, 'import logging\n'), ((2439, 2467), 'slack_sdk.WebClient', 'WebClient', ([], {'token': 'slack_token'}), '(token=slack_token)\n', (2448, 2467), False, 'from slack_sdk import WebClient\n'), ((2508, 2545), 'slack... |
"""
This is the documentaion of the Llama2-7B-chat model from hugging face models
This model has 7 billion parameters develped by Meta
This is used for QnA purposes on local machine for testing...
Model hardware config:
- GPU: Nvidia RTX 40 Series (12GB) --> CUDA support
- RAM... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.get_response_synthesizer",
"llama_index.postprocessor.SimilarityPostprocessor",
"llama_index.query_engine.RetrieverQueryEngine",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.prompts.Pro... | [((1191, 1204), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1202, 1204), False, 'from dotenv import load_dotenv\n'), ((1216, 1237), 'os.getenv', 'os.getenv', (['"""HF_TOKEN"""'], {}), "('HF_TOKEN')\n", (1225, 1237), False, 'import os\n'), ((5540, 5566), 'os.system', 'os.system', (['"""rm -rf Data_*"""'], {}... |
import sys
sys.stdout.reconfigure(encoding="utf-8")
sys.stdin.reconfigure(encoding="utf-8")
import streamlit as st
import streamlit.components.v1 as components
import re
import random
CODE_BUILD_KG = """
# Prepare for GraphStore
os.environ['NEBULA_USER'] = "root"
os.environ['NEBULA_PASSWORD'] = "nebula" # defaul... | [
"llama_index.llms.AzureOpenAI",
"llama_index.LLMPredictor",
"llama_index.query_engine.KnowledgeGraphQueryEngine",
"llama_index.ServiceContext.from_defaults",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.graph_stores.NebulaGraphStore"
] | [((12, 52), 'sys.stdout.reconfigure', 'sys.stdout.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (34, 52), False, 'import sys\n'), ((53, 92), 'sys.stdin.reconfigure', 'sys.stdin.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (74, 92), False, 'import sys\n'), ((2986, 3... |
import datetime
import uuid
from llama_index.core.memory import ChatMemoryBuffer
class Chat:
def __init__(self, model):
self.model = model
if model.id is None:
self.id = str(uuid.uuid4())
else:
self.id = model.id
self.history = ChatMemoryBuffer.from_defau... | [
"llama_index.core.memory.ChatMemoryBuffer.from_defaults"
] | [((293, 341), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(3900)'}), '(token_limit=3900)\n', (323, 341), False, 'from llama_index.core.memory import ChatMemoryBuffer\n'), ((366, 389), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n'... |
from pathlib import Path
from llama_index import Document, SimpleDirectoryReader, download_loader
from llama_index.query_engine import RetrieverQueryEngine
from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores... | [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.embeddings.openai.OpenA... | [((531, 544), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (542, 544), False, 'from dotenv import load_dotenv\n'), ((1142, 1160), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (1158, 1160), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1366, 1472), 'pi... |
from llama_index.node_parser import SimpleNodeParser
from typing import *
from llama_index.data_structs import Node
import requests
from collections import defaultdict
from llama_index import Document
from config import config
def load_and_parse(all_docs):
documents = []
for file_row in all_docs:
url... | [
"llama_index.node_parser.SimpleNodeParser.from_defaults"
] | [((430, 443), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (441, 443), False, 'from collections import defaultdict\n'), ((1033, 1120), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'config.node_chunk_size', 'chunk_overlap': '(50)'}), '(chu... |
from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage
from langchain.chat_models import ChatOpenAI
import gradio as gr
import sys
import os
import openai
openai.api_base = "https://api.app4gpt.com/v1"
os.environ["OPENAI_A... | [
"llama_index.PromptHelper",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader"
] | [((597, 696), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (609, 696), False, 'from llama_index import SimpleDirectoryReader, L... |
from llama_index.core.llms import ChatMessage
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core.prompts import PromptTemplate
from projectgurukul.custom_models import model_utils
import logging
def get_tinyllama_llm(context_window = 2048, max_new_tokens = 256, system_prompt = ""):
def m... | [
"llama_index.llms.huggingface.HuggingFaceLLM",
"llama_index.core.prompts.PromptTemplate"
] | [((720, 754), 'projectgurukul.custom_models.model_utils.get_device_and_dtype', 'model_utils.get_device_and_dtype', ([], {}), '()\n', (752, 754), False, 'from projectgurukul.custom_models import model_utils\n'), ((857, 942), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (["(f'<|system|>{system_prompt}' + ... |
from llama_index.multi_modal_llms import GeminiMultiModal
from llama_index.program import MultiModalLLMCompletionProgram
from llama_index.output_parsers import PydanticOutputParser
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from pydantic import BaseModel, Field
from typing_extensions import Annota... | [
"llama_index.multi_modal_llms.GeminiMultiModal",
"llama_index.multi_modal_llms.openai.OpenAIMultiModal",
"llama_index.output_parsers.PydanticOutputParser"
] | [((1607, 1657), 'pydantic.Field', 'Field', (['...'], {'description': '"""Name of the damaged part"""'}), "(..., description='Name of the damaged part')\n", (1612, 1657), False, 'from pydantic import BaseModel, Field\n'), ((1676, 1726), 'pydantic.Field', 'Field', (['...'], {'description': '"""Estimated cost of repair"""... |
import streamlit as st
import pandas as pd
import os
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from llama_index import (
SimpleDirectoryReader,
VectorSt... | [
"llama_index.llms.LlamaCPP"
] | [((13454, 13514), 'pandas.read_csv', 'pd.read_csv', (['"""src/data/plant_compatibility.csv"""'], {'index_col': '(0)'}), "('src/data/plant_compatibility.csv', index_col=0)\n", (13465, 13514), True, 'import pandas as pd\n'), ((13774, 13825), 'streamlit.session_state.raw_plant_compatibility.to_numpy', 'st.session_state.ra... |
import requests
from bs4 import BeautifulSoup
from typing import Tuple, Dict, Any
from llama_index import Document
def page_ingest(url) -> Tuple[str, Dict[str, Any]]:
print("url", url)
label = ''
# Fetch the content from url
response = requests.get(url)
# Create a BeautifulSoup object and specif... | [
"llama_index.Document"
] | [((256, 273), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (268, 273), False, 'import requests\n'), ((344, 387), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (357, 387), False, 'from bs4 import BeautifulSoup\n'), ((807, 854), 'llama... |
from pathlib import Path
from llama_index import GPTSimpleVectorIndex, download_loader
import sys
def load_document(file):
RDFReader = download_loader("RDFReader")
loader = RDFReader()
return loader.load_data(file=Path(file))
def query(index, prompt):
print("PROMPT:", prompt)
result = index.query(... | [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex",
"llama_index.download_loader"
] | [((140, 168), 'llama_index.download_loader', 'download_loader', (['"""RDFReader"""'], {}), "('RDFReader')\n", (155, 168), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((620, 650), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['document'], {}), '(document)\n', (640, 650), Fa... |
from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage
from langchain.chat_models import ChatOpenAI
import gradio as gr
class ChatbotIndex:
def __init__(self, model_name, directory_path):
self.llm_predictor = LLMPredictor(C... | [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.load_index_from_storage"
] | [((1293, 1393), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'chatbot.query_response', 'inputs': '"""text"""', 'outputs': '"""text"""', 'title': '"""LocalGPT Chatbot"""'}), "(fn=chatbot.query_response, inputs='text', outputs='text',\n title='LocalGPT Chatbot')\n", (1305, 1393), True, 'import gradio as gr\n'), ((3... |
# Constants
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.prompts import ChatPromptTemplate
from llama_index.llms import OpenAI, ChatMessage, MessageRole
from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.node_... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.ChatMessage",
"llama_index.prompts.ChatPromptTemplate",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor",
"llama_index.indices.po... | [((744, 788), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'MODEL', 'temperature': 'TEMPERATURE'}), '(model=MODEL, temperature=TEMPERATURE)\n', (750, 788), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((820, 921), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defa... |
import os
import uvicorn
import asyncio
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext
from fastapi.middleware.cors import CORSMiddleware
from langcha... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.LLMPredictor"
] | [((360, 429), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', temperature=0, streaming=True)\n", (370, 429), False, 'from langchain.chat_models import ChatOpenAI\n'), ((446, 467), 'llama_index.LLMPr... |
"""Handles chat interactions for WandBot.
This module contains the Chat class which is responsible for handling chat interactions.
It includes methods for initializing the chat, loading the storage context from an artifact,
loading the chat engine, validating and formatting questions, formatting responses, and getti... | [
"llama_index.callbacks.WandbCallbackHandler",
"llama_index.callbacks.TokenCountingHandler",
"llama_index.llms.generic_utils.messages_to_history_str",
"llama_index.llms.ChatMessage",
"llama_index.callbacks.CallbackManager",
"llama_index.indices.postprocessor.CohereRerank",
"llama_index.schema.QueryBundle... | [((2223, 2243), 'wandbot.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (2233, 2243), False, 'from wandbot.utils import Timer, get_logger, load_service_context\n'), ((2368, 2415), 'llama_index.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['message_templates[:-1]'], {}),... |
import streamlit as st
from llama_index import VectorStoreIndex
from llama_index.vector_stores import ChromaVectorStore
import chromadb
st.title('Precident')
# load and prime the index
db2 = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db2.get_or_create_collection("quickstart")
vector_store = Chr... | [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.ChromaVectorStore"
] | [((137, 158), 'streamlit.title', 'st.title', (['"""Precident"""'], {}), "('Precident')\n", (145, 158), True, 'import streamlit as st\n'), ((193, 238), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (218, 238), False, 'import chromadb\n'), ((317, ... |
import os
import time
from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context
import llama_index
from Models import Models
from DocumentClass import DocumentClass
class MediawikiLLM:
service_context = None
mediawiki_url = None
api_url ... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.set_global_service_context",
"llama_index.indices.empty.EmptyIndex"
] | [((542, 564), 'DocumentClass.DocumentClass', 'DocumentClass', (['api_url'], {}), '(api_url)\n', (555, 564), False, 'from DocumentClass import DocumentClass\n'), ((795, 870), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""', 'chunk_size': '(1024... |
import os
import shutil
import chromadb
import redis
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.storage import StorageContext
from app.tools import FindEmbeddingsPath
from llama_index.vector_stores.redis import RedisVectorStore
from llama_index.vector_stores.chroma import ChromaVectorS... | [
"llama_index.core.storage.StorageContext.from_defaults",
"llama_index.vector_stores.chroma.ChromaVectorStore",
"llama_index.vector_stores.redis.RedisVectorStore"
] | [((370, 408), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (388, 408), False, 'from app.tools import FindEmbeddingsPath\n'), ((469, 505), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (494, 505), False,... |
#!/usr/bin/env python3
import json
import logging
import re
import requests
import altair as alt
import matplotlib.pyplot as plt
import pandas as pd
import streamlit as st
from datetime import datetime, timedelta
from langchain.llms import OpenAI
from llama_index import GPTVectorStoreIndex, Document, LLMPredictor, S... | [
"llama_index.ServiceContext.from_defaults"
] | [((419, 451), 'logging.getLogger', 'logging.getLogger', (['"""llama_index"""'], {}), "('llama_index')\n", (436, 451), False, 'import logging\n'), ((992, 1102), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'TITLE', 'page_icon': 'ICON', 'layout': '"""centered"""', 'initial_sidebar_state': '"""co... |
# https://gpt-index.readthedocs.io/en/latest/examples/query_engine/sub_question_query_engine.html
# Using LlamaIndex as a Callable Tool
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import i... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.tools.ToolMetadata",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader"
] | [((874, 992), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'truncation': 'only_first', 'max_length': 1024}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'truncation': 'only_first', 'max_length': 1024})\n", (888, 992), False, 'from langch... |
from llama_index.core.node_parser import HTMLNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/others/sample.html"))
my_tags = ["p", "span"]
html_parser = HTMLNodeParser(tags=my_tags)
nodes = html_parser.get_nodes_from_d... | [
"llama_index.readers.file.FlatReader",
"llama_index.core.node_parser.HTMLNodeParser"
] | [((139, 151), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (149, 151), False, 'from llama_index.readers.file import FlatReader\n'), ((255, 283), 'llama_index.core.node_parser.HTMLNodeParser', 'HTMLNodeParser', ([], {'tags': 'my_tags'}), '(tags=my_tags)\n', (269, 283), False, 'from llama_index.... |
from django.shortcuts import render
from django.views import generic
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
import os
fr... | [
"llama_index.load_index_from_storage"
] | [((817, 835), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (825, 835), False, 'from rest_framework.decorators import api_view\n'), ((545, 585), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (568, 585), False, ... |
from pathlib import Path
from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex
from llama_index.vector_stores import MilvusVectorStore
from llama_index.readers import PDFReader
from llama_index import StorageContext
from pymilvus import MilvusClient
import os
# Define constants for Mi... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.readers.PDFReader",
"llama_index.vector_stores.MilvusVectorStore"
] | [((353, 399), 'os.environ.get', 'os.environ.get', (['"""MILVUS_HOST"""', '"""10.97.151.193"""'], {}), "('MILVUS_HOST', '10.97.151.193')\n", (367, 399), False, 'import os\n'), ((414, 452), 'os.environ.get', 'os.environ.get', (['"""MILVUS_PORT"""', '"""19530"""'], {}), "('MILVUS_PORT', '19530')\n", (428, 452), False, 'im... |
from llama_index.llms.ollama import Ollama
from typing import Any, Sequence
from llama_index.core.bridge.pydantic import Field
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
)
from llama_index.core.llms.callbacks import llm_chat... | [
"llama_index.core.llms.callbacks.llm_completion_callback",
"llama_index.core.llms.callbacks.llm_chat_callback",
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.bridge.pydantic.Field"
] | [((396, 473), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '""""""', 'description': '"""Default system message to send to the model."""'}), "(default='', description='Default system message to send to the model.')\n", (401, 473), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((5... |
#!/usr/bin/env python3
from dataclasses import dataclass, field
from typing import cast
from loguru import logger
from llama_index.core import Document, VectorStoreIndex, Settings
from llama_index.core.query_engine import CitationQueryEngine
import nest_asyncio
from uglychain import Model, Retriever, StorageRetriever... | [
"llama_index.core.query_engine.CitationQueryEngine.from_args",
"llama_index.core.Document"
] | [((454, 512), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (473, 512), False, 'import logging\n'), ((587, 607), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (605, 607), False, 'import nest_asyncio\n')... |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.extractors.entity import EntityExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_fr... | [
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.SimpleDirectoryReader",
"llama_index.extractors.entity.EntityExtractor"
] | [((177, 207), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (198, 207), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((248, 292), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), ... |
from llama_index.llms.llama_cpp import LlamaCPP
from llama_index.llms.llama_cpp.llama_utils import (
messages_to_prompt,
completion_to_prompt,
)
from llama_index.llms.openai import OpenAI
from core.manager import settings
MODEL = "openai"
# LLM selection
if MODEL == "openai":
print("USE OPENAI")
# Use... | [
"llama_index.llms.llama_cpp.LlamaCPP",
"llama_index.llms.openai.OpenAI"
] | [((470, 567), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-turbo-preview"""', 'api_key': 'settings.OPENAI_KEY', 'system_prompt': 'system_prompt'}), "(model='gpt-4-turbo-preview', api_key=settings.OPENAI_KEY,\n system_prompt=system_prompt)\n", (476, 567), False, 'from llama_index.llms.openai i... |
from llama_index import SimpleDirectoryReader,VectorStoreIndex , load_index_from_storage
from llama_index.storage.storage_context import StorageContext
from dotenv import load_dotenv
import logging
import sys
load_dotenv()
# enable INFO level logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
loggin... | [
"llama_index.SimpleDirectoryReader",
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.storage.storage_context.StorageContext.from_defaults"
] | [((212, 225), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (223, 225), False, 'from dotenv import load_dotenv\n'), ((255, 313), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (274, 313), False, 'import logging... |
from typing import List
from fastapi import APIRouter, Depends, HTTPException, status
from llama_index.chat_engine.types import BaseChatEngine
from llama_index.llms.base import ChatMessage
from llama_index.llms.types import MessageRole
from pydantic import BaseModel
from app.engine.index import get_chat_engine
chat_r... | [
"llama_index.llms.base.ChatMessage"
] | [((332, 343), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (341, 343), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((647, 671), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (654, 671), False, 'from fastapi import APIRouter, Depends, HTTPException,... |
import os
from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
import openai
import logging
import sys
#llamaindex logs
logging.basicConfig(stream=sys.stdout, level=logg... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.LLMPredictor",
"llama_index.SimpleDirectoryReader"
] | [((271, 329), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (290, 329), False, 'import logging\n'), ((630, 657), 'os.getenv', 'os.getenv', (['"""AZURE_API_BASE"""'], {}), "('AZURE_API_BASE')\n", (639, 657), False, 'imp... |
import time
import os
import streamlit as st
import openai
import logging
import sys
import llama_index
from qdrant_client import QdrantClient
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.llms import OpenAI
from llama_index import SimpleDirectoryReader
from llama_index.storage.storage_conte... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.embeddings.VoyageEmbedding",
"llama_index.llms.OpenAI",
"llama_index.set_global_servi... | [((585, 723), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'f"""Courier v{version}"""', 'page_icon': '"""🌎"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=f'Courier v{version}', page_icon='🌎', layout=\n 'centered', initial_sideb... |
#! coding: utf-8
import os
from dataclasses import dataclass
from typing import List, Dict, Optional
from llama_index import ServiceContext, get_response_synthesizer, VectorStoreIndex, StorageContext, \
load_indices_from_storage, TreeIndex
from llama_index.indices.base import BaseIndex
from llama_index.indices.pos... | [
"llama_index.load_indices_from_storage"
] | [((899, 995), 'llama_index.load_indices_from_storage', 'load_indices_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (924, 995), False, 'from llama_index import ServiceContext, get_response_s... |
import logging
import glob
from pathlib import Path
from llama_index import (
SimpleDirectoryReader,
download_loader
)
class DataReader:
__LOGGER_NAME = "data_reader"
__SIMPLE_SUPPORTED_EXTENSIONS = [".csv", ".docx", ".epub", ".hwp", ".ipynb", ".jpeg", ".mbox", ".md", ".mp3", ".pdf", ".png", ".pptm",... | [
"llama_index.download_loader",
"llama_index.SimpleDirectoryReader"
] | [((498, 535), 'logging.getLogger', 'logging.getLogger', (['self.__LOGGER_NAME'], {}), '(self.__LOGGER_NAME)\n', (515, 535), False, 'import logging\n'), ((2288, 2330), 'llama_index.download_loader', 'download_loader', (['self.__JSON_READER_LOADER'], {}), '(self.__JSON_READER_LOADER)\n', (2303, 2330), False, 'from llama_... |
from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext
from llama_index.llms import LlamaCPP
llm = LlamaCPP(model_path="./models/llama-2-13b-chat.Q4_0.gguf")
llm_predictor = LLMPredictor(llm=llm)
service_context = ServiceContext.from_defaults(llm_pred... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.LlamaCPP",
"llama_index.set_global_service_context"
] | [((167, 225), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': '"""./models/llama-2-13b-chat.Q4_0.gguf"""'}), "(model_path='./models/llama-2-13b-chat.Q4_0.gguf')\n", (175, 225), False, 'from llama_index.llms import LlamaCPP\n'), ((242, 263), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), ... |
#!/usr/bin/env python3
# -*- coding utf-8 -*-
import openai
import yaml
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import SpacyTextSplitter
from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader
from llama_index.node_parser import SimpleNodeParser
'''
... | [
"llama_index.node_parser.SimpleNodeParser",
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTListIndex",
"llama_index.SimpleDirectoryReader"
] | [((803, 860), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (831, 860), False, 'from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader\n'), ((906, 967), 'langchain.text_splitter.... |
from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.multi_modal import Mu... | [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.callbacks.base.CallbackManager"
] | [((1114, 1604), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only c... |
import asyncio
import os
import tempfile
import traceback
from datetime import datetime, date
from functools import partial
from pathlib import Path
import discord
import aiohttp
import openai
import tiktoken
from langchain.chat_models import ChatOpenAI
from llama_index import (
QuestionAnswerPrompt,
GPTVecto... | [
"llama_index.get_response_synthesizer",
"llama_index.indices.query.query_transform.StepDecomposeQueryTransform",
"llama_index.OpenAIEmbedding",
"llama_index.composability.QASummaryQueryEngineBuilder",
"llama_index.MockEmbedding",
"llama_index.QuestionAnswerPrompt",
"llama_index.ServiceContext.from_defau... | [((1135, 1168), 'services.environment_service.EnvService.get_max_search_price', 'EnvService.get_max_search_price', ([], {}), '()\n', (1166, 1168), False, 'from services.environment_service import EnvService\n'), ((1346, 1384), 'services.environment_service.EnvService.get_google_search_api_key', 'EnvService.get_google_s... |
from typing import List
from llama_index import Document, StorageContext, VectorStoreIndex, load_index_from_storage
import os
def create_vector(service_context, vector_storage_dir: str, doc_loader: callable) -> List[Document]:
if not os.path.exists(vector_storage_dir):
documents = doc_loader()
pri... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((240, 274), 'os.path.exists', 'os.path.exists', (['vector_storage_dir'], {}), '(vector_storage_dir)\n', (254, 274), False, 'import os\n'), ((416, 491), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_contex... |
import streamlit as st
import torch
from glob import glob
from pathlib import Path
from llama_index.prompts.prompts import SimpleInputPrompt
from llama_index import (
set_global_service_context,
ServiceContext,
VectorStoreIndex,
download_loader,
)
from langchain.embeddings import HuggingFaceEmbeddings
f... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.download_loader",
"llama_index.llms.HuggingFaceLLM",
"llama_index.prompts.prompts.SimpleInputPrompt",
"llama_index.set_global_service_context"
] | [((495, 527), 'llama_index.download_loader', 'download_loader', (['"""PyMuPDFReader"""'], {}), "('PyMuPDFReader')\n", (510, 527), False, 'from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, download_loader\n'), ((2176, 2216), 'llama_index.prompts.prompts.SimpleInputPrompt', 'SimpleInpu... |
from langchain.agents import load_tools, Tool, tool
from langchain.agents import initialize_agent
from langchain.llms import OpenAI, OpenAIChat
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS
from langchain.text_splitter import CharacterTextSpl... | [
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.Document"
] | [((721, 754), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (744, 754), False, 'import pinecone, warnings, yaml, os\n'), ((978, 990), 'gptcache.cache.init', 'cache.init', ([], {}), '()\n', (988, 990), False, 'from gptcache import cache\n'), ((999, 1021), 'gptcache.cache.s... |
import os, config, openai
from llama_index import StorageContext, load_index_from_storage
openai.api_key = config.OPENAI_API_KEY
os.environ['OPENAI_API_KEY'] = config.OPENAI_API_KEY
# new version of llama index uses StorageContext instead of load_from_disk
# index = GPTSimpleVectorIndex.load_from_disk('index_news.jso... | [
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((342, 395), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (370, 395), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((404, 444), 'llama_index.load_index_from_storage', 'load_index_from... |
import os
from dotenv import load_dotenv, find_dotenv
import numpy as np
import nest_asyncio
nest_asyncio.apply()
def get_openai_api_key():
_ = load_dotenv(find_dotenv())
return os.getenv("OPENAI_API_KEY")
from trulens_eval import (
Feedback,
TruLlama,
OpenAI
)
from trulens_eval.feedback imp... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.V... | [((96, 116), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (114, 116), False, 'import nest_asyncio\n'), ((192, 219), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (201, 219), False, 'import os\n'), ((408, 416), 'trulens_eval.OpenAI', 'OpenAI', ([], {}), '()\n', (414, ... |
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.llms import Perplexity
from llama_index import SimpleDirectoryReader
@st.cache_resource(show_spinner=True)
def load_data():
with st.spinner(text="Die LSB-Informationen werden indiziert. Das dauert nur ein paar Augenbli... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.llms.Perplexity",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader"
] | [((168, 204), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(True)'}), '(show_spinner=True)\n', (185, 204), True, 'import streamlit as st\n'), ((657, 1257), 'llama_index.llms.Perplexity', 'Perplexity', ([], {'api_key': 'pplx_api_key', 'model': '"""pplx-70b-chat"""', 'temperature': '(0.4)', 'sy... |
import logging
import sys
from dotenv import load_dotenv
from llama_index.core import VectorStoreIndex
from llama_index.readers.web import SimpleWebPageReader
def setup_logging():
"""
Initialize logging configuration to output logs to stdout.
"""
logging.basicConfig(stream=sys.stdout, level=logging.INF... | [
"llama_index.readers.web.SimpleWebPageReader",
"llama_index.core.VectorStoreIndex.from_documents"
] | [((264, 322), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (283, 322), False, 'import logging\n'), ((506, 519), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (517, 519), False, 'from dotenv import load_dotenv... |
# %% [markdown]
# # Llama-Index Quickstart
#
# In this quickstart you will create a simple Llama Index App and learn how to log it and get feedback on an LLM response.
#
# For evaluation, we will leverage the "hallucination triad" of groundedness, context relevance and answer relevance.
#
# [, 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '"""fovi-site"""', 'location': '"""us-west1"""'}), "(project='fovi-site', location='us-west1')\n", (1451, 1493), False, 'from google.cloud import aiplatform\n'), ((1501, 1531), 'trulens_eval.Tru', 'Tru', ([], {'database_redact_keys': '(... |
import os
import shutil
import tarfile
import tempfile
import time
from pathlib import Path
import arxiv
import openai
import pandas as pd
import pdfplumber
import streamlit as st
from llama_index import (KeywordTableIndex, KnowledgeGraphIndex,
ServiceContext, SimpleDirectoryReader, SummaryInd... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.schema.Document",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.set_global_service_context",
"llama_index.llms.Xinference"
] | [((1035, 1158), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with arXiv paper without PDF noise, powered by LaTeX Rainbow."""', 'layout': '"""wide"""'}), "(page_title=\n 'Chat with arXiv paper without PDF noise, powered by LaTeX Rainbow.',\n layout='wide')\n", (1053, 1158), True... |
"""Global eval handlers."""
from typing import Any
from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler
from llama_index.legacy.callbacks.arize_phoenix_callback import (
arize_phoenix_callback_handler,
)
from llama_index.legacy.callbacks.base_handler import BaseCallbackHandler
from l... | [
"llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler",
"llama_in... | [((1239, 1274), 'llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1259, 1274), False, 'from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1332, 1375), 'llama_index.legacy.callbacks.open_inference_callback.OpenInfe... |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# ht... | [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.LLMPredictor",
"llama_index.SimpleDirectoryReader"
] | [((1797, 1849), 'nemoguardrails.RailsConfig.from_content', 'RailsConfig.from_content', (['COLANG_CONFIG', 'YAML_CONFIG'], {}), '(COLANG_CONFIG, YAML_CONFIG)\n', (1821, 1849), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((1860, 1876), 'nemoguardrails.LLMRails', 'LLMRails', (['config'], {}), '(config)\n... |
import sys
from langchain import OpenAI
from pathlib import Path
import llama_index as li
#from llamahub.connectors import TextFileConnector
from llama_index import SimpleDirectoryReader,GPTListIndex,LLMPredictor
file_name = sys.argv[1]
llm_predictor = LLMPredictor(llm=OpenAI(model_name="gpt-3.5-turbo")) #temperature=... | [
"llama_index.GPTListIndex",
"llama_index.SimpleDirectoryReader"
] | [((391, 409), 'llama_index.GPTListIndex', 'GPTListIndex', (['docs'], {}), '(docs)\n', (403, 409), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((271, 305), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""'}), "(model_name='gpt-3.5-turbo')\n", (277, 305), F... |
# Copyright © 2024 Pathway
"""
Pathway vector search server and client.
The server reads source documents and build a vector index over them, then starts serving
HTTP requests.
The client queries the server and returns matching documents.
"""
import asyncio
import functools
import json
import logging
import threadi... | [
"llama_index.core.schema.TextNode",
"llama_index.core.ingestion.pipeline.run_transformations"
] | [((1515, 1548), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (1542, 1548), False, 'import asyncio\n'), ((1111, 1138), 'asyncio.run', 'asyncio.run', (['self.coroutine'], {}), '(self.coroutine)\n', (1122, 1138), False, 'import asyncio\n'), ((1192, 1218), 'asyncio.get_running_l... |
# Imports
from collections import defaultdict
from time import sleep
from llama_index import (
StorageContext,
load_index_from_storage,
set_global_service_context,
)
from model_context import get_anyscale_context
from templates import custom_template, yn_template
import csv
from tqdm import tqdm
from openai... | [
"llama_index.set_global_service_context",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((345, 416), 'openai.OpenAI', 'OpenAI', ([], {'base_url': '"""https://api.endpoints.anyscale.com/v1"""', 'api_key': '"""KEY"""'}), "(base_url='https://api.endpoints.anyscale.com/v1', api_key='KEY')\n", (351, 416), False, 'from openai import OpenAI\n'), ((2366, 2383), 'collections.defaultdict', 'defaultdict', (['list']... |
import os
import glob
import llama_index
from llama_index.core import ServiceContext
from llama_index.llms.anthropic import Anthropic
from llama_index.core import SimpleDirectoryReader
from llama_index.core.response_synthesizers import TreeSummarize
# MODEL = "claude-3-opus-20240229"
# MODEL = "claude-3-sonnet-2024022... | [
"llama_index.llms.anthropic.Anthropic",
"llama_index.core.response_synthesizers.TreeSummarize",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.ServiceContext.from_defaults"
] | [((470, 509), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (481, 509), False, 'import os\n'), ((1440, 1479), 'llama_index.llms.anthropic.Anthropic', 'Anthropic', ([], {'model': 'MODEL', 'max_tokens': '(1024)'}), '(model=MODEL, max_tokens=1024)\n', (1449, 147... |
import streamlit as st
import os
import openai
import llama_index
from llama_index.llms import OpenAI
from llama_index.indices.composability import ComposableGraph
from llama_index.storage import StorageContext
from llama_index import TreeIndex, SummaryIndex
from llama_index.indices.loading import load_graph_from_stor... | [
"llama_index.storage.StorageContext.from_defaults",
"llama_index.indices.loading.load_graph_from_storage"
] | [((520, 709), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with AAPL 23 10-Qs, powered by Munger"""', 'page_icon': '""":chart_with_upwards_trend:"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title='Chat with AAPL 23 10-Qs, powe... |
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" h... | [
"llama_index.core.callbacks.LlamaDebugHandler",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.set_global_handler",
"llama_index.callbacks.wandb.WandbCallbackHandler",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.... | [((926, 962), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0)'}), "(model='gpt-4', temperature=0)\n", (932, 962), False, 'from llama_index.llms.openai import OpenAI\n'), ((1040, 1103), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""wandb"""'], {'run_args... |
import streamlit as st
import llama_index
from llama_index import StorageContext, load_index_from_storage
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import SimpleVectorStore
from llama_index.storage.index_store im... | [
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.ServiceContext.from_defaults",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.Prompt",
"llama_index.load_index_from_storage",
"llama_index.vector_stores.SimpleVectorStore.from_persist_dir"
... | [((1225, 1252), 'streamlit.title', 'st.title', (['"""Llama Index App"""'], {}), "('Llama Index App')\n", (1233, 1252), True, 'import streamlit as st\n'), ((1355, 1400), 'streamlit.multiselect', 'st.multiselect', (['"""Select indexes"""', 'index_names'], {}), "('Select indexes', index_names)\n", (1369, 1400), True, 'imp... |
from fastapi import FastAPI
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
import os.path
import llama_index
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
ServiceContext,
load_index_from_storage,
set_global_serv... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.set_global_handler",
"llama_index.SimpleDirectoryReader",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.load_index_from_storage",
"llama_ind... | [((528, 568), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (558, 568), False, 'import llama_index\n'), ((601, 610), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (608, 610), False, 'from fastapi import FastAPI\n'), ((722, 795), 'llama_index.embeddings.Hug... |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')... | [
"llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine",
"llama_index.core.PromptTemplate",
"llama_index.packs.tables.chain_of_table.base.serialize_table",
"llama_index.core.query_pipeline.QueryPipeline",
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.llms.openai.OpenAI"
] | [((389, 442), 'pandas.read_csv', 'pd.read_csv', (['"""./WikiTableQuestions/csv/200-csv/3.csv"""'], {}), "('./WikiTableQuestions/csv/200-csv/3.csv')\n", (400, 442), True, 'import pandas as pd\n'), ((622, 707), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""ChainOfTablePack"""', '"""./cha... |
import gradio as gr
import os
from datetime import datetime
import logging
import sys
from llama_index import SimpleDirectoryReader
import llama_index.readers.file.base
import glob
import numpy as np
import soundfile as sf
import shutil
import openai
import json
import cv2
from llama_index import download_loader
Imag... | [
"llama_index.download_loader"
] | [((337, 374), 'llama_index.download_loader', 'download_loader', (['"""ImageCaptionReader"""'], {}), "('ImageCaptionReader')\n", (352, 374), False, 'from llama_index import download_loader\n'), ((423, 481), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sy... |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.... | [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7432, 7500), 'llama_index.download.utils.ini... |
import os
import openai
import logging
import sys
import llama_index
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
LLMPredictor,
PromptHelper,
ServiceContext,
)
from llama_index.llms import OpenAI
import chromadb
from llama_index.... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.query_engine.CitationQueryEngine.from_args",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI",
... | [((879, 884), 'trulens_eval.Tru', 'Tru', ([], {}), '()\n', (882, 884), False, 'from trulens_eval import Tru\n'), ((907, 965), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (926, 965), False, 'import logging\n'), ((997,... |
import streamlit as st
import llama_index
from llama_index import StorageContext, load_index_from_storage
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import SimpleVectorStore
from llama_index.storage.index_store im... | [
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.ResponseSynthesizer.from_args",
"llama_index.ServiceContext.from_defaults",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.Prompt",
"llama_index.load_index_from_storage",
"llama_index.vec... | [((2439, 2481), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context_1'], {}), '(storage_context_1)\n', (2462, 2481), False, 'from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage\n'), ((2493, 2535), 'llama_index.load_index_from_storage', 'lo... |
# https://www.youtube.com/watch?v=oDzWsynpOyI
import logging
import sys
import os
from dotenv import load_dotenv
load_dotenv()
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
ServiceContext,
Document,
)
import json
import llama_in... | [
"llama_index.llms.AzureOpenAI",
"llama_index.postprocessor.MetadataReplacementPostProcessor",
"llama_index.embeddings.AzureOpenAIEmbedding",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.StorageContext.from_defaults",
"llama_index.VectorStoreInde... | [((116, 129), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (127, 129), False, 'from dotenv import load_dotenv\n'), ((963, 996), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_API_KEY"""'], {}), "('AZURE_OPENAI_API_KEY')\n", (972, 996), False, 'import os\n'), ((1014, 1048), 'os.getenv', 'os.getenv', (['"""AZURE_... |
from importlib import metadata
from pathlib import WindowsPath
from re import sub
from llama_index import (
ServiceContext,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
global_service_context,
)
import llama_index
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index... | [
"llama_index.postprocessor.MetadataReplacementPostProcessor",
"llama_index.postprocessor.SimilarityPostprocessor",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.StorageContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.readers.SimpleDirectoryReader",
"llama_index.VectorStore... | [((1300, 1460), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""', 'include_metadata': '(True)'}), "(window_size=3, window_metadata_key=\n '... |
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.... | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.ServiceContext.from_defaults",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.VectorStoreIndex",
"llama_index.core.tools.FunctionTool.from_defaults",
"llama_index.core.lla... | [((266, 281), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (279, 281), True, 'import phoenix as px\n'), ((385, 405), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (403, 405), False, 'import nest_asyncio\n'), ((549, 624), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack... |
import os
import hashlib
from threading import Thread
from pathlib import Path
#import llama_index
from openai import OpenAI
import constants as c
from llama_index import StorageContext, VectorStoreIndex, Document
from llama_index.node_parser import SimpleNodeParser
from llama_index import SimpleDirectoryReader
c.Get_... | [
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.load_index_from_storage",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.PromptH... | [((314, 325), 'constants.Get_API', 'c.Get_API', ([], {}), '()\n', (323, 325), True, 'import constants as c\n'), ((335, 343), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (341, 343), False, 'from openai import OpenAI\n'), ((1027, 1045), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {}), '()\n', (1043, 1045), ... |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.core.embeddings.base import BaseEmbedding
from llama_index.indices.prompt_helper imp... | [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.P... | [((962, 989), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (979, 989), False, 'import logging\n'), ((1764, 1821), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n'... |
import os
import hashlib
from threading import Thread
from pathlib import Path
#import llama_index
from openai import OpenAI
import constants as c
c.Get_API()
client = OpenAI()
newdocspath = ""
masterpath = ""
basepath = ""
persistpath = ""
# test
class Document:
__slots__ = ['text', 'doc_id', 'id_', 'hash']
... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.StorageContext",
"llama_index.StorageContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.load_index_from_storage",
"llama_index.Document"
] | [((147, 158), 'constants.Get_API', 'c.Get_API', ([], {}), '()\n', (156, 158), True, 'import constants as c\n'), ((168, 176), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (174, 176), False, 'from openai import OpenAI\n'), ((854, 872), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {}), '()\n', (870, 872), Fals... |
import dataclasses
import logging
from dataclasses import dataclass
from typing import Optional
from llama_index.bridge.langchain import BaseLanguageModel
import llama_index
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai ... | [
"llama_index.langchain_helpers.chain_wrapper.LLMPredictor",
"llama_index.callbacks.base.CallbackManager",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults",
"llama_index.logger.LlamaLogger",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.openai.Op... | [((714, 741), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (731, 741), False, 'import logging\n'), ((972, 1094), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callba... |
import llama_index.core
llama_index.core.set_global_handler("simple")
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleFileNodeParser
from llama_index.core import VectorStoreIndex
#Loading
documents = SimpleDirectoryReader("dataset/txt").load_data()
print(document... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader"
] | [((445, 487), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (476, 487), False, 'from llama_index.core import VectorStoreIndex\n'), ((257, 293), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""dataset/txt"""'], {}), "... |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set... | [
"llama_index.postprocessor.cohere_rerank.CohereRerank",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.output_parsers.PydanticOutputParser",
"llama_index.core.PromptTemplate",
"llama_index.core.response_synthesizers.TreeSummarize",
"llama_index.core.query_pipeline.QueryPipeline",
"ll... | [((259, 274), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (272, 274), True, 'import phoenix as px\n'), ((510, 539), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (516, 539), False, 'from llama_index.llms.openai import OpenAI\n'), ((563, 610... |
import logging
from dataclasses import dataclass
from typing import Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolve_embe... | [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.llms.loading.load_llm",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.node_parser.extractors.loading.load_extractor",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loadin... | [((965, 992), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (982, 992), False, 'import logging\n'), ((1223, 1345), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callb... |
from urllib import response
import llama_index
from pathlib import Path
from typing import Annotated, List
from fastapi.responses import StreamingResponse
from fastapi import (
File,
Form,
UploadFile,
APIRouter,
Depends,
HTTPException,
Request,
status
)
from llama_index import StorageCo... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.SummaryIndexEmbeddingRetriever",
"llama_index.llms.types.ChatMessage",
"llama_index.vector_stores.MetadataFilter",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.selectors.llm_selectors.LLMSingleSelector.from_defaults",
"lla... | [((1534, 1545), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (1543, 1545), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((3266, 3308), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_tra... |
import utils
import os
import requests
import llama_index
import torch
import llama_cpp
from llama_index import SimpleDirectoryReader
from llama_index import Document
from llama_index import VectorStoreIndex
from llama_index import ServiceContext
from llama_index import LLMPredictor
# Paramas
llama = True
### Get d... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.llama_utils.completion_to_prompt",
"llama_index.llms.Replicate"
] | [((1239, 1307), 'huggingface_hub.hf_hub_download', 'hf_hub_download', ([], {'repo_id': 'model_name_or_path', 'filename': 'model_basename'}), '(repo_id=model_name_or_path, filename=model_basename)\n', (1254, 1307), False, 'from huggingface_hub import hf_hub_download\n'), ((2628, 2799), 'llama_index.llms.Replicate', 'Rep... |
"""Chat service module."""
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from app.api.database.models.message import MessageCreateModel
from app.api.services.message_service import MessageService
from app.api.services.ingest_service import i... | [
"llama_index.core.memory.ChatMemoryBuffer.from_defaults",
"llama_index.core.base.llms.types.ChatMessage"
] | [((532, 548), 'app.api.services.message_service.MessageService', 'MessageService', ([], {}), '()\n', (546, 548), False, 'from app.api.services.message_service import MessageService\n'), ((655, 746), 'app.api.services.ingest_service.ingest_service.index.as_query_engine', 'ingest_service.index.as_query_engine', ([], {'si... |
"""FastAPI app creation, logger configuration and main API routes."""
import sys
from typing import Any
import llama_index
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from loguru import logger
from private_gpt.paths import docs_path
from private_gpt.server.chat.chat_router import chat_ro... | [
"llama_index.set_global_handler"
] | [((774, 790), 'loguru.logger.remove', 'logger.remove', (['(0)'], {}), '(0)\n', (787, 790), False, 'from loguru import logger\n'), ((897, 1147), 'loguru.logger.add', 'logger.add', (['sys.stdout'], {'level': '"""INFO"""', 'format': '"""<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{na... |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
from pathlib import Path
data_dir... | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.objects.ObjectIndex.from_objects",
"llama_index.core.load_index_from_storage",
"llama_index.core.objects.SQLTableSchema",
"llama_index.core.query_pipeline.FnComponent",
"llama_index.core.prompts.default_prompts.DEFAULT_TEXT_TO_SQL_PROMPT.p... | [((323, 363), 'pathlib.Path', 'Path', (['"""./WikiTableQuestions/csv/200-csv"""'], {}), "('./WikiTableQuestions/csv/200-csv')\n", (327, 363), False, 'from pathlib import Path\n'), ((3874, 3909), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///:memory:"""'], {}), "('sqlite:///:memory:')\n", (3887, 3909), Fa... |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
g... | [
"llama_index.core.query_engine.NLSQLTableQueryEngine",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.agent.QueryPipelineAgentWorker",
"llama_index.core.agent.react.output_parser.ReActOutputParser",
"llama_index.core.query_pipeline.ToolRunnerComponent",
"llama_index.core.PromptTemplate",
... | [((183, 220), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///chinook.db"""'], {}), "('sqlite:///chinook.db')\n", (196, 220), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, select, column\n'), ((236, 255), 'llama_index.core.SQLDatabase', 'SQLDatabase', (['engine'], ... |
import requests
import pandas as pd
from bs4 import BeautifulSoup
import os
from llama_index import SimpleDirectoryReader,GPTListIndex,GPTVectorStoreIndex,LLMPredictor,PromptHelper,ServiceContext,StorageContext
from langchain import OpenAI
import openai
import llama_index
# from main import secret_key
# with open('ke... | [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.load_index_from_storage",
"llama_index.PromptHelper"
] | [((490, 501), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (499, 501), False, 'import os\n'), ((1741, 1790), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'json': 'payload'}), '(url, headers=headers, json=payload)\n', (1754, 1790), False, 'import requests\n'), ((2167, 2211), 'os.path.join', 'os.path... |
"""FastAPI app creation, logger configuration and main API routes."""
from typing import Any
import llama_index
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from private_gpt.paths import docs_path
from private_gpt.server.chat.chat_router import chat_router
from private_gpt.server.chunks.c... | [
"llama_index.set_global_handler"
] | [((735, 775), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (765, 775), False, 'import llama_index\n'), ((2313, 2322), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (2320, 2322), False, 'from fastapi import FastAPI\n'), ((2447, 3013), 'fastapi.openapi.util... |
import llama_index
from .di import global_injector
from .launcher import create_app
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((86, 126), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (116, 126), False, 'import llama_index\n')] |
import logging
from dataclasses import dataclass
from typing import List, Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolv... | [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.P... | [((1018, 1045), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1035, 1045), False, 'import logging\n'), ((1820, 1877), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata... |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from auth_RAG.di import global_injector
from auth_RAG.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((211, 251), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (241, 251), False, 'import llama_index\n'), ((259, 286), 'auth_RAG.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (269, 286), False, 'from auth_RAG.launcher im... |
import llama_index
from pydantic import BaseModel
from typing import List
from typing import Optional
class Section(BaseModel):
section_id: str
section_text: str
vector_representation: Optional[List[float]]
keywords: Optional[List[str]]
named_entities: Optional[List[str]]
summary: Optional[str... | [
"llama_index.Document"
] | [((918, 1146), 'llama_index.Document', 'llama_index.Document', ([], {'text': "(self.section_text or '')", 'doc_id': "(f'{self.document_id}-{self.section_id}' if self.document_id and self.\n section_id else '')", 'extra_info': 'extra_info', 'embedding': '(self.vector_representation or [])'}), "(text=self.section_text... |
## create graph
from pyvis.network import Network
import llama_index.core
from llama_index.core import StorageContext, load_index_from_storage
storage_context = StorageContext.from_defaults(persist_dir="math_index_persist")
index = load_index_from_storage(storage_context)
# retriever = llama_index.core.indices.knowled... | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage"
] | [((162, 224), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""math_index_persist"""'}), "(persist_dir='math_index_persist')\n", (190, 224), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((233, 273), 'llama_index.core.load_inde... |
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, List, Optional
if TYPE_CHECKING:
from llama_index import ServiceContext
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager
from llama_in... | [
"llama_index.core.embeddings.utils.resolve_embed_model",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.set_global_handler",
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.core.utils.get_tokenizer",
... | [((1680, 1696), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (1691, 1696), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2626, 2658), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)... |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.... | [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5360, 5393), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5374, 5393), False, 'import os\n'), ((7213, 7281), 'llama_index.download.utils.ini... |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# ht... | [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.LLMPredictor",
"llama_index.SimpleDirectoryReader"
] | [((1791, 1843), 'nemoguardrails.RailsConfig.from_content', 'RailsConfig.from_content', (['COLANG_CONFIG', 'YAML_CONFIG'], {}), '(COLANG_CONFIG, YAML_CONFIG)\n', (1815, 1843), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((1854, 1870), 'nemoguardrails.LLMRails', 'LLMRails', (['config'], {}), '(config)\n... |
import logging
from dataclasses import dataclass
from typing import Optional, Union
import llama_index
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.prompt_helper import Pro... | [
"llama_index.logger.LlamaLogger",
"llama_index.callbacks.base.CallbackManager",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((809, 836), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (826, 836), False, 'import logging\n'), ((1067, 1189), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callb... |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import loggin... | [
"llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.schema.TextNode",
"llama_index.response.schema.Response",
"llama_index.indices.query.schema.QueryBundle"
] | [((1046, 1073), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1063, 1073), False, 'import logging\n'), ((2734, 2767), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2765, 2767), True, 'impor... |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-rag-fusion-query-pipeline')
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul... | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI"
] | [((432, 483), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['pg_essay.txt']"}), "(input_files=['pg_essay.txt'])\n", (453, 483), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((535, 550), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (548, 550), Tr... |
import uvicorn
import os
import logging
import llama_index
from typing import cast
from pathlib import Path
from fastapi.middleware.cors import CORSMiddleware
from fastapi import FastAPI
from dotenv import load_dotenv
from contextlib import asynccontextmanager
from firebase_admin import credentials, initialize_app
fro... | [
"llama_index.set_global_handler"
] | [((620, 633), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (631, 633), False, 'from dotenv import load_dotenv\n'), ((641, 651), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (649, 651), False, 'from pathlib import Path\n'), ((705, 736), 'os.getenv', 'os.getenv', (['"""ENVIRONMENT"""', '"""dev"""'], {}), "... |
#!/usr/bin/env python
import os, sys
print("[INFO] Python", sys.version)
if "VIRTUAL_ENV" in os.environ:
print("[INFO] venv:", os.environ["VIRTUAL_ENV"])
if sys.version_info.major != 3 or sys.version_info.minor not in (8,9,10,11):
print("[WARNING] Unsupported python version!")
print("[INFO] Testing imports...... | [
"llama_index.ServiceContext.from_defaults"
] | [((644, 775), 'llama_index.ServiceContext.from_defaults', 'llama_index.ServiceContext.from_defaults', ([], {'embed_model': '"""local:sentence-transformers/all-minilm-l6-v2"""', 'chunk_size': '(256)', 'llm': 'None'}), "(embed_model=\n 'local:sentence-transformers/all-minilm-l6-v2', chunk_size=256, llm=None)\n", (684,... |
import logging
from dataclasses import dataclass
from typing import List, Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolv... | [
"llama_index.llm_predictor.loading.load_predictor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.logger.LlamaLogger",
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.P... | [((1019, 1046), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1036, 1046), False, 'import logging\n'), ((1821, 1878), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata... |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
from deprecated import deprecated
import llama_index.core
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.base.embeddings.base import B... | [
"llama_index.core.embeddings.utils.resolve_embed_model",
"llama_index.core.node_parser.loading.load_parser",
"llama_index.core.extractors.loading.load_extractor",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.core.s... | [((1132, 1159), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1149, 1159), False, 'import logging\n'), ((1934, 1991), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_met... |
import itertools
import logging
from os import path
from typing import List, Sequence
import llama_index.vector_stores
import ray
from kfp import compiler, dsl
from langchain.embeddings.fake import FakeEmbeddings
from llama_index import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.data_structs imp... | [
"llama_index.data_structs.IndexDict",
"llama_index.llms.MockLLM",
"llama_index.StorageContext.from_defaults"
] | [((397, 436), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (416, 436), False, 'import logging\n'), ((4202, 4501), 'kfp.dsl.component', 'dsl.component', ([], {'target_image': '"""us-central1-docker.pkg.dev/kflow-artifacts/kfp-components/kfp-vectorize-dataset:la... |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# ht... | [
"llama_index.callbacks.base.CallbackManager"
] | [((1536, 1583), 'opentelemetry.sdk.resources.Resource.create', 'Resource.create', (["{SERVICE_NAME: 'chain-server'}"], {}), "({SERVICE_NAME: 'chain-server'})\n", (1551, 1583), False, 'from opentelemetry.sdk.resources import SERVICE_NAME, Resource\n'), ((1595, 1628), 'opentelemetry.sdk.trace.TracerProvider', 'TracerProv... |
import json
import os
import time
import fitz # PyMuPDF
import llama_index
import openai
import weaviate
from weaviate.gql.get import HybridFusion
from unstructured.cleaners.core import clean
from llama_index.vector_stores import WeaviateVectorStore
from llama_index import VectorStoreIndex, ServiceContext, set_global_... | [
"llama_index.llms.AzureOpenAI",
"llama_index.embeddings.AzureOpenAIEmbedding",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.response.pprint_utils.pprint_source_node",
"llama_index.vector_stores.WeaviateVectorStore"
] | [((707, 773), 'langchain.document_loaders.GutenbergLoader', 'GutenbergLoader', (['"""https://www.gutenberg.org/files/2591/2591-0.txt"""'], {}), "('https://www.gutenberg.org/files/2591/2591-0.txt')\n", (722, 773), False, 'from langchain.document_loaders import GutenbergLoader\n'), ((817, 892), 'langchain.text_splitter.C... |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
... | [
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3728, 3... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.