Implementation of IWE Content Bridge as an AI-Powered Knowledge Graph with Agentic RAG, OpenAI Function Calling, and Graph Traversal

In this lesson, we use YOU: an open source, scalable personal information management system that manages put-down notes as a floating information graph. Since IWE is a CLI/LSP tool designed for local programmers. We build a virtual developer’s knowledge base from scratch, connect wiki links and markup links into a directed graph, and walk through all the major functions of IWE: passive search available, context-aware retrieval, tree-based domain visualization, document and squash integration, statistics with statistics, and DOT graph export visually. Then we go beyond the CLI by integrating OpenAI to enable IWE-style AI manipulation: summary, link suggestion, and todo output, directly against our knowledge graph. Finally, we build a complete RAG pipeline where an AI agent navigates the graph using calling tools, performs multi-hop reasoning on all linked documents, identifies information gaps, and extracts new notes that fit into the existing structure.
import subprocess, sys
def _install(pkg):
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", pkg])
_install("openai")
_install("graphviz")
import re, json, textwrap, os, getpass
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Optional
from datetime import datetime
try:
from google.colab import userdata
OPENAI_API_KEY = userdata.get("OPENAI_API_KEY")
if not OPENAI_API_KEY:
raise ValueError
print("β
Loaded OPENAI_API_KEY from Colab secrets.")
except Exception:
OPENAI_API_KEY = getpass.getpass("π Enter your OpenAI API key: ")
print("β
API key received.")
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from openai import OpenAI
client = OpenAI(api_key=OPENAI_API_KEY)
print("n" + "=" * 72)
print(" IWE Advanced Tutorial β Knowledge Graph + AI Agents")
print("=" * 72)
@dataclass
class Section:
level: int
title: str
content: str
children: list = field(default_factory=list)
@dataclass
class Document:
key: str
title: str
raw_content: str
sections: list = field(default_factory=list)
outgoing_links: list = field(default_factory=list)
tags: list = field(default_factory=list)
created: str = ""
modified: str = ""
class KnowledgeGraph:
def __init__(self):
self.documents: dict[str, Document] = {}
self.backlinks: dict[str, set] = defaultdict(set)
_WIKI_LINK = re.compile(r"[[([^]|]+)(?:|([^]]+))?]]")
_MD_LINK = re.compile(r"[([^]]+)](([^)]+))")
_HEADER = re.compile(r"^(#{1,6})s+(.+)", re.MULTILINE)
_TAG = re.compile(r"#([a-zA-Z][w/-]*)")
def _extract_links(self, text: str) -> list[str]:
links = []
for match in self._WIKI_LINK.finditer(text):
links.append(match.group(1).strip())
for match in self._MD_LINK.finditer(text):
target = match.group(2).strip()
if not target.startswith("http"):
target = target.replace(".md", "")
links.append(target)
return links
def _parse_sections(self, text: str) -> list[Section]:
sections = []
parts = self._HEADER.split(text)
i = 1
while i < len(parts) - 1:
level = len(parts[i])
title = parts[i + 1].strip()
body = parts[i + 2] if i + 2 < len(parts) else ""
sections.append(Section(level=level, title=title, content=body.strip()))
i += 3
return sections
def _extract_tags(self, text: str) -> list[str]:
tags = set()
for line in text.split("n"):
if line.strip().startswith("#") and " " in line.strip():
stripped = re.sub(r"^#{1,6}s+.*", "", line)
for m in self._TAG.finditer(stripped):
tags.add(m.group(1))
else:
for m in self._TAG.finditer(line):
tags.add(m.group(1))
return sorted(tags)
def add_document(self, key: str, content: str) -> Document:
sections = self._parse_sections(content)
title = sections[0].title if sections else key
links = self._extract_links(content)
tags = self._extract_tags(content)
now = datetime.now().strftime("%Y-%m-%d %H:%M")
doc = Document(
key=key, title=title, raw_content=content,
sections=sections, outgoing_links=links, tags=tags,
created=now, modified=now,
)
self.documents[key] = doc
for target in links:
self.backlinks[target].add(key)
return doc
def get(self, key: str) -> Optional[Document]:
return self.documents.get(key)
def find(self, query: str, roots_only: bool = False, limit: int = 10) -> list[str]:
q = query.lower()
scored = []
for key, doc in self.documents.items():
score = 0
if q in doc.title.lower():
score += 10
if q in doc.raw_content.lower():
score += doc.raw_content.lower().count(q)
if q in key.lower():
score += 5
for tag in doc.tags:
if q in tag.lower():
score += 3
if score > 0:
scored.append((key, score))
scored.sort(key=lambda x: -x[1])
results = [k for k, _ in scored[:limit]]
if roots_only:
results = [k for k in results if not self.backlinks.get(k)]
return results
def retrieve(self, key: str, depth: int = 1, context: int = 1,
exclude: set = None) -> str:
exclude = exclude or set()
parts = []
if context > 0:
parents_of = list(self.backlinks.get(key, set()) - exclude)
for p in parents_of[:context]:
pdoc = self.get(p)
if pdoc:
parts.append(f"[CONTEXT: {pdoc.title}]n{pdoc.raw_content[:300]}...n")
exclude.add(p)
doc = self.get(key)
if not doc:
return f"β Document '{key}' not found."
parts.append(doc.raw_content)
exclude.add(key)
if depth > 0:
for link in doc.outgoing_links:
if link not in exclude:
child = self.get(link)
if child:
parts.append(f"n---n[LINKED: {child.title}]n")
parts.append(
self.retrieve(link, depth=depth - 1,
context=0, exclude=exclude)
)
return "n".join(parts)
def tree(self, key: str, indent: int = 0, _visited: set = None) -> str:
_visited = _visited if _visited is not None else set()
doc = self.get(key)
if not doc:
return ""
prefix = " " * indent + ("ββ " if indent else "")
if key in _visited:
return f"{prefix}{doc.title} ({key}) β© (circular ref)"
_visited.add(key)
lines = [f"{prefix}{doc.title} ({key})"]
for link in doc.outgoing_links:
if self.get(link):
lines.append(self.tree(link, indent + 1, _visited))
return "n".join(lines)
def squash(self, key: str, visited: set = None) -> str:
visited = visited or set()
doc = self.get(key)
if not doc or key in visited:
return ""
visited.add(key)
parts = [doc.raw_content]
for link in doc.outgoing_links:
child_content = self.squash(link, visited)
if child_content:
parts.append(f"n{'β' * 40}n")
parts.append(child_content)
return "n".join(parts)
def stats(self) -> dict:
total_words = sum(len(d.raw_content.split()) for d in self.documents.values())
total_links = sum(len(d.outgoing_links) for d in self.documents.values())
orphans = [k for k in self.documents if not self.backlinks.get(k)
and not self.documents[k].outgoing_links]
all_tags = set()
for d in self.documents.values():
all_tags.update(d.tags)
return {
"total_documents": len(self.documents),
"total_words": total_words,
"total_links": total_links,
"unique_tags": len(all_tags),
"tags": sorted(all_tags),
"orphan_notes": orphans,
"avg_words_per_doc": total_words // max(len(self.documents), 1),
}
def export_dot(self, highlight_key: str = None) -> str:
lines = ['digraph KnowledgeGraph {',
' rankdir=LR;',
' node [shape=box, style="rounded,filled", fillcolor="#f0f4ff", '
'fontname="Helvetica", fontsize=10];',
' edge [color="#666666", arrowsize=0.7];']
for key, doc in self.documents.items():
label = doc.title[:30]
color="#ffe4b5" if highlight_key == key else '#f0f4ff'
lines.append(f' "{key}" [label="{label}", fillcolor="{color}"];')
for key, doc in self.documents.items():
for link in doc.outgoing_links:
if link in self.documents:
lines.append(f' "{key}" -> "{link}";')
lines.append("}")
return "n".join(lines)
print("nβ
Section 1 complete β KnowledgeGraph class defined.n")We install the necessary dependencies, securely accept the OpenAI API key with Colab secrets or password prompts, and launch the OpenAI client. We then define three basic data categories, Category, Document, and Information Graph, which mirror the IWE field-based graph structure where each tag file is a node and every link is a directed edge. We use the full range of IWE CLI functions in the KnowledgeGraph class, including markdown parsing of wiki links and topics, implicit search by find, context-aware retrieval by retrieval, tree-safe sequential display, document aggregation by squash, database statistics with statistics, and DOT graph export.
kg = KnowledgeGraph()
kg.add_document("project-index", """# Web App Project
This is the **Map of Content** for our web application project.
## Architecture
- [Authentication System](authentication)
- [Database Design](database-design)
- [API Design](api-design)
## Development
- [Frontend Stack](frontend-stack)
- [Deployment Pipeline](deployment)
## Research
- [[caching-strategies]]
- [[performance-notes]]
""")
kg.add_document("authentication", """# Authentication System
Our app uses **JWT-based authentication** with refresh tokens.
## Flow
1. User submits credentials to `/api/auth/login`
2. Server validates against [Database Design](database-design) user table
3. Returns short-lived access token (15 min) + refresh token (7 days)
4. Client stores refresh token in HTTP-only cookie
## Security Considerations
- Passwords hashed with bcrypt (cost factor 12)
- Rate limiting on login endpoint: 5 attempts / minute
- Refresh token rotation on each use
- See [[caching-strategies]] for session caching
#security #jwt #auth
""")
kg.add_document("database-design", """# Database Design
We use **PostgreSQL 16** with the following core tables.
## Users Table
```sql
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
email VARCHAR(255) UNIQUE NOT NULL,
password VARCHAR(255) NOT NULL,
created_at TIMESTAMPTZ DEFAULT NOW()
);
```
## Sessions Table
```sql
CREATE TABLE sessions (
id UUID PRIMARY KEY,
user_id UUID REFERENCES users(id),
token_hash VARCHAR(255) NOT NULL,
expires_at TIMESTAMPTZ NOT NULL
);
```
## Indexing Strategy
- B-tree on `users.email` for login lookups
- B-tree on `sessions.token_hash` for token validation
- See [[performance-notes]] for query optimization
#database #postgresql #schema
""")
kg.add_document("api-design", """# API Design
RESTful API following OpenAPI 3.0 specification.
## Endpoints
| Method | Path | Description |
|--------|------|-------------|
| POST | /api/auth/login | Authenticate user |
| POST | /api/auth/refresh | Refresh access token |
| GET | /api/users/me | Get current user profile |
| PUT | /api/users/me | Update profile |
## Error Handling
All errors return JSON with `{ "error": "code", "message": "..." }`.
Authentication endpoints documented in [Authentication System](authentication).
Data models align with [Database Design](database-design).
#api #rest #openapi
""")
kg.add_document("frontend-stack", """# Frontend Stack
## Technology Choices
- **Framework**: React 19 with Server Components
- **Styling**: Tailwind CSS v4
- **State Management**: Zustand for client state
- **Data Fetching**: TanStack Query v5
## Auth Integration
The frontend consumes the [API Design](api-design) endpoints.
Access tokens are stored in memory (not localStorage) for security.
Refresh handled transparently via Axios interceptors.
#frontend #react #tailwind
""")
kg.add_document("deployment", """# Deployment Pipeline
## Infrastructure
- **Container Runtime**: Docker with multi-stage builds
- **Orchestration**: Kubernetes on GKE
- **CI/CD**: GitHub Actions β Google Artifact Registry β GKE
## Pipeline Stages
1. Lint & type-check
2. Unit tests (Jest + pytest)
3. Build Docker images
4. Push to Artifact Registry
5. Deploy to staging (auto)
6. Deploy to production (manual approval)
## Monitoring
- Prometheus + Grafana for metrics
- Structured logging with correlation IDs
- See [[performance-notes]] for SLOs
#devops #kubernetes #cicd
""")
kg.add_document("caching-strategies", """# Caching Strategies
## Application-Level Caching
- **Redis** for session storage and rate limiting
- Cache-aside pattern for frequently accessed user profiles
- TTL: 5 minutes for profiles, 15 minutes for config
## HTTP Caching
- `Cache-Control: private, max-age=0` for authenticated endpoints
- `Cache-Control: public, max-age=3600` for static assets
- ETag support for conditional requests
## Cache Invalidation
- Event-driven invalidation via pub/sub
- Versioned cache keys: `user:{id}:v{version}`
Related: [Authentication System](authentication) uses Redis for refresh tokens.
#caching #redis #performance
""")
kg.add_document("performance-notes", """# Performance Notes
## Database Query Optimization
- Use `EXPLAIN ANALYZE` before deploying new queries
- Connection pooling with PgBouncer (max 50 connections)
- Avoid N+1 queries β use JOINs or DataLoader pattern
## SLO Targets
| Metric | Target | Current |
|--------|--------|---------|
| p99 latency | < 200ms | 180ms |
| Availability | 99.9% | 99.95% |
| Error rate | < 0.1% | 0.05% |
## Load Testing
- k6 scripts in `/tests/load/`
- Baseline: 1000 RPS sustained
- Spike: 5000 RPS for 60 seconds
Related to [Database Design](database-design) indexing and [[caching-strategies]].
#performance #slo #monitoring
""")
print("β
Section 2 complete β 8 documents loaded into knowledge graph.n")
print("β" * 72)
print(" 3A Β· iwe find β Search the Knowledge Graph")
print("β" * 72)
results = kg.find("authentication")
print(f"nπ find('authentication'): {results}")
results = kg.find("performance")
print(f"π find('performance'): {results}")
results = kg.find("cache", roots_only=True)
print(f"π find('cache', roots_only=True): {results}")
print("n" + "β" * 72)
print(" 3B Β· iwe tree β Document Hierarchy")
print("β" * 72)
print()
print(kg.tree("project-index"))
print("n" + "β" * 72)
print(" 3C Β· iwe stats β Knowledge Base Statistics")
print("β" * 72)
stats = kg.stats()
for k, v in stats.items():
print(f" {k:>25s}: {v}")
print("n" + "β" * 72)
print(" 3D Β· iwe retrieve β Context-Aware Retrieval")
print("β" * 72)
print("nπ Retrieving 'authentication' with depth=1, context=1:n")
retrieved = kg.retrieve("authentication", depth=1, context=1)
print(retrieved[:800] + "n... (truncated)")
print("n" + "β" * 72)
print(" 3E Β· iwe squash β Combine Documents")
print("β" * 72)
squashed = kg.squash("project-index")
print(f"nπ Squashed 'project-index': {len(squashed)} characters, "
f"{len(squashed.split())} words")
print("n" + "β" * 72)
print(" 3F Β· iwe export dot β Graph Visualization")
print("β" * 72)
dot_output = kg.export_dot(highlight_key="project-index")
print(f"nπ¨ DOT output ({len(dot_output)} chars):n")
print(dot_output[:500] + "n...")
try:
import graphviz
src = graphviz.Source(dot_output)
src.render("knowledge_graph", format="png", cleanup=True)
print("nβ
Graph rendered to 'knowledge_graph.png'")
try:
from IPython.display import Image, display
display(Image("knowledge_graph.png"))
except ImportError:
print(" (Run in Colab/Jupyter to see the image inline)")
except Exception as e:
print(f" β Graphviz rendering skipped: {e}")
print("nβ
Section 3 complete β all graph operations demonstrated.n")We anchor the KnowledgeGraph and populate it with eight linked markup documents that form a virtual developer knowledge base, extreme authentication, database architecture, API design, frontend, deployment, caching, and performance, all organized under the Content Map entry, just as we can organize notes in IWE. We then apply all graph operations against this knowledge base: we search with find, display the complete document sequence with a tree, extract statistics with statistics, perform context-aware retrieval that follows links by retrieval, merge the entire graph into a single document with squash, and output the structure as a DOT graph. We render the graph visually using Graphviz and display it in line, giving us a clear picture of how all our notes connect to each other.
print("β" * 72)
print(" 4 Β· AI-Powered Document Transforms")
print("β" * 72)
def ai_transform(text: str, action: str, context: str = "",
model: str = "gpt-4o-mini") -> str:
prompts = {
"rewrite": (
"Rewrite the following text to improve clarity and readability. "
"Keep the markdown formatting. Return ONLY the rewritten text."
),
"summarize": (
"Summarize the following text in 2-3 concise bullet points. "
"Focus on the key decisions and technical choices."
),
"expand": (
"Expand the following text with more technical detail and examples. "
"Keep the same structure and add depth."
),
"extract_todos": (
"Extract all actionable items from this text and format them as "
"a markdown todo list. If there are no actionable items, suggest "
"relevant next steps based on the content."
),
"generate_links": (
"Analyze the following note and suggest related topics that should "
"be linked. Format as a markdown list of wiki-links: [[topic-name]]. "
"Only suggest topics that are genuinely related."
),
}
system_msg = prompts.get(action, prompts["rewrite"])
if context:
system_msg += f"nnDocument context:n{context[:500]}"
messages = [
{"role": "system", "content": system_msg},
{"role": "user", "content": text},
]
response = client.chat.completions.create(
model=model, messages=messages, temperature=0.3, max_tokens=1000,
)
return response.choices[0].message.content.strip()
auth_doc = kg.get("authentication")
print("nπ Transform: SUMMARIZE β Authentication Systemn")
summary = ai_transform(auth_doc.raw_content, "summarize")
print(summary)
print("nnπ Transform: GENERATE_LINKS β Authentication Systemn")
links = ai_transform(auth_doc.raw_content, "generate_links")
print(links)
print("nnβ
Transform: EXTRACT_TODOS β Performance Notesn")
perf_doc = kg.get("performance-notes")
todos = ai_transform(perf_doc.raw_content, "extract_todos")
print(todos)
print("nβ
Section 4 complete β AI transforms demonstrated.n")We define an ai_transform function similar to the IWE action system of config.toml, which supports five types of transformations: rewrite, shorten, expand, remove_todos, and generic_links, each of which is supported by the designed system information sent to OpenAI. We do three live demos against our knowledge base: we summarize the Validation System document into short bullet points, analyze it to find suggested wiki links to related topics, and extract actionable items from the Working Notes document. We see that the IWE action pattern of AI, selecting a document, selecting a transformation, and applying it to a field, translates directly into a reusable Python function that works with any node in our graph.
print("β" * 72)
print(" 5 Β· Agentic RAG β AI Navigates Your Knowledge Graph")
print("β" * 72)
AGENT_TOOLS = [
{
"type": "function",
"function": {
"name": "iwe_find",
"description": "Search the knowledge graph for documents matching a query. Returns a list of document keys.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"},
"roots_only": {"type": "boolean", "description": "Only return root/MOC documents", "default": False},
},
"required": ["query"],
},
},
},
{
"type": "function",
"function": {
"name": "iwe_retrieve",
"description": "Retrieve a document's content with linked context. Use depth>0 to follow outgoing links, context>0 to include parent documents.",
"parameters": {
"type": "object",
"properties": {
"key": {"type": "string", "description": "Document key to retrieve"},
"depth": {"type": "integer", "description": "How many levels of child links to follow (0-2)", "default": 1},
"context": {"type": "integer", "description": "How many levels of parent context (0-1)", "default": 0},
},
"required": ["key"],
},
},
},
{
"type": "function",
"function": {
"name": "iwe_tree",
"description": "Show the document hierarchy starting from a given key.",
"parameters": {
"type": "object",
"properties": {
"key": {"type": "string", "description": "Root document key"},
},
"required": ["key"],
},
},
},
{
"type": "function",
"function": {
"name": "iwe_stats",
"description": "Get statistics about the entire knowledge base.",
"parameters": {"type": "object", "properties": {}},
},
},
]
def execute_tool(name: str, args: dict) -> str:
if name == "iwe_find":
results = kg.find(args["query"], roots_only=args.get("roots_only", False))
return json.dumps({"results": results})
elif name == "iwe_retrieve":
content = kg.retrieve(
args["key"],
depth=args.get("depth", 1),
context=args.get("context", 0),
)
return content[:3000]
elif name == "iwe_tree":
return kg.tree(args["key"])
elif name == "iwe_stats":
return json.dumps(kg.stats(), indent=2)
return "Unknown tool"
def run_agent(question: str, max_turns: int = 6, model: str = "gpt-4o-mini") -> str:
system_prompt = textwrap.dedent("""
You are an AI assistant with access to a personal knowledge graph (IWE).
Use the provided tools to navigate the graph and answer questions.
Workflow:
1. Use iwe_find to discover relevant documents
2. Use iwe_retrieve to read content (set depth=1 to follow links)
3. Follow relationships to build comprehensive understanding
4. Synthesize information from multiple documents
Be specific and cite which documents you found information in.
If you cannot find enough information, say so clearly.
""")
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": question},
]
for turn in range(max_turns):
response = client.chat.completions.create(
model=model, messages=messages, tools=AGENT_TOOLS,
tool_choice="auto",
)
msg = response.choices[0].message
if msg.tool_calls:
messages.append(msg)
for tc in msg.tool_calls:
fn_name = tc.function.name
fn_args = json.loads(tc.function.arguments)
print(f" π§ Agent calls: {fn_name}({fn_args})")
result = execute_tool(fn_name, fn_args)
messages.append({
"role": "tool",
"tool_call_id": tc.id,
"content": result,
})
else:
return msg.content
return "Agent reached maximum turns without completing."
questions = [
"How does our authentication system work, and what database tables does it depend on?",
"What is our deployment pipeline, and what are the performance SLO targets?",
"Give me a high-level overview of the entire project architecture.",
]
for i, q in enumerate(questions, 1):
print(f"n{'β' * 72}")
print(f" Question {i}: {q}")
print(f"{'β' * 72}n")
answer = run_agent(q)
print(f"nπ‘ Agent Answer:n{answer}n")
print("nβ
Section 5 complete β Agentic RAG demonstrated.n")We are building a full recovery pipeline that includes the IWE concept of βContext Bridgeβ: an AI agent that navigates our knowledge graph using OpenAI calling with four tools: iwe_find for discovery, iwe_retrieve for context-aware content, iwe_tree for category analysis, and iwe_stats for basic knowledge statistics. We connect a tool builder that dispatches each function call to our KnowledgeGraph instance, and uses an agent loop that iterates through search-retrieve-compile cycles until it compiles the complete response. We then answer three complex ongoing demo questions, asking about validation dependencies, deployment and SLO goals, and an overall view of the project structure, and watch the agent drive independently, follow links between documents, and generate complete answers based on our notes.
print("β" * 72)
print(" 6 Β· AI-Powered Knowledge Graph Maintenance")
print("β" * 72)
def analyze_knowledge_gaps(model: str = "gpt-4o-mini") -> str:
stats_info = json.dumps(kg.stats(), indent=2)
titles = [f"- {d.title} ({k}): links to {d.outgoing_links}"
for k, d in kg.documents.items()]
graph_overview = "n".join(titles)
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": (
"You are a knowledge management consultant. Analyze this "
"knowledge graph and identify: (1) missing topics that should "
"exist, (2) documents that should be linked but aren't, "
"(3) areas that need more detail. Be specific and actionable."
)},
{"role": "user", "content": (
f"Knowledge base stats:n{stats_info}nn"
f"Document structure:n{graph_overview}"
)},
],
temperature=0.4, max_tokens=1000,
)
return response.choices[0].message.content.strip()
def generate_new_note(topic: str, related_keys: list[str],
model: str = "gpt-4o-mini") -> str:
context_parts = []
for key in related_keys[:3]:
doc = kg.get(key)
if doc:
context_parts.append(f"## {doc.title}n{doc.raw_content[:400]}")
context = "nn".join(context_parts)
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": (
"You are a technical writer. Generate a new markdown note "
"about the given topic. Use wiki-links [[like-this]] to "
"reference related existing documents. Include relevant "
"headers, code examples where appropriate, and hashtag tags."
)},
{"role": "user", "content": (
f"Topic: {topic}nn"
f"Related existing notes for context:n{context}nn"
f"Available documents to link to: {list(kg.documents.keys())}"
)},
],
temperature=0.5, max_tokens=1200,
)
return response.choices[0].message.content.strip()
print("nπ Analyzing knowledge gaps...n")
gaps = analyze_knowledge_gaps()
print(gaps)
print("nnπ Generating a new note: 'Error Handling Strategy'...n")
new_note = generate_new_note(
"Error Handling Strategy",
related_keys=["api-design", "authentication", "frontend-stack"],
)
print(new_note[:1000] + "n... (truncated)")
kg.add_document("error-handling", new_note)
print(f"nβ
Added 'error-handling' to knowledge graph. "
f"Total documents: {len(kg.documents)}")
dot_output = kg.export_dot(highlight_key="error-handling")
try:
import graphviz
src = graphviz.Source(dot_output)
src.render("knowledge_graph_v2", format="png", cleanup=True)
print("β
Updated graph rendered to 'knowledge_graph_v2.png'")
try:
from IPython.display import Image, display
display(Image("knowledge_graph_v2.png"))
except ImportError:
pass
except Exception as e:
print(f" β Graphviz rendering skipped: {e}")
print("nβ
Section 6 complete β AI-powered maintenance demonstrated.n")
print("β" * 72)
print(" 7 Β· Multi-Hop Reasoning Across the Knowledge Graph")
print("β" * 72)
complex_question = (
"If we increase our traffic from 1000 RPS to 5000 RPS sustained, "
"what changes would be needed across the entire stack β from database "
"connection pooling, to caching, to authentication token handling, "
"to deployment infrastructure?"
)
print(f"nπ§ Complex multi-hop question:n {complex_question}n")
answer = run_agent(complex_question, max_turns=8)
print(f"nπ‘ Agent Answer:n{answer}")
print("nn" + "=" * 72)
print(" β
TUTORIAL COMPLETE")
print("=" * 72)
print("""
You've explored all the core concepts of IWE:
1. Knowledge Graph β Documents as nodes, links as edges
2. Markdown Parsing β Wiki-links, headers, tags
3. Maps of Content β Hierarchical organisation (MOC)
4. Graph Operations β find, retrieve, tree, squash, stats, export
5. AI Transforms β Rewrite, summarize, expand, extract todos
6. Agentic Retrieval β AI agent navigating your knowledge graph
7. Graph Maintenance β AI-powered gap analysis and note generation
8. Multi-Hop Reasoning β Cross-document synthesis
To use IWE for real (with your editor):
β
β
IWE supports VS Code, Neovim, Zed, and Helix via LSP.
""")We use AI to analyze our knowledge graph to find structural gaps, identify missing topics, disconnected texts, and areas that need more depth. We then automatically generate a new “Error Handling Strategy” note that references existing documentation via wiki links and adds it to the live graph. We also provide an updated Graphviz visualization, highlighting a new area to show how the knowledge base grows organically as AI and human contributions layer on top of each other. We close with the complex thinking challenge of multi-hop, asking what changes are needed in the whole stack if we scale from 1000 to 5000 RPS, where the agent has to cross the database, temporary storage, authentication, and shipping documents in order to put together a distinguishing answer that no single note can provide alone.
In conclusion, we now have a complete, functional implementation of the core concepts of IWE working in the Colab environment. We’ve seen that editing notes as a graph, instead of treating them as flat files, unlocks powerful capabilities: relationships become navigable paths, context flows naturally from parent to child documents, and AI agents can find, parse, and synthesize information the way we edit it. We have built a complete pipeline from low-level segmentation and backlink identification to traversal graph processing, AI-enabled document transformation, agent retrieval by calling tools, knowledge gap analysis, and multi-hop reasoning that integrates the entire knowledge base. Everything we’re building here is mapped directly to the original IWE features: find, find, tree, squash, and export commands, config.toml AI actions and the Context Bridge philosophy, which maps your own knowledge graph as shared memory between you and your AI agents.
Check it out The complete Notebook is here. Also, feel free to follow us Twitter and don’t forget to join our 120k+ ML SubReddit and Subscribe to Our newspaper. Wait! are you on telegram? now you can join us on telegram too.
Michal Sutter is a data science expert with a Master of Science in Data Science from the University of Padova. With a strong foundation in statistical analysis, machine learning, and data engineering, Michal excels at turning complex data sets into actionable insights.



