Skip to content

Commit bb1700a

Browse files
Annieyun/clean (#3)
Co-authored-by: Alon Ragoler <alan.ragoler@gmail.com>
1 parent 187684c commit bb1700a

File tree

3 files changed

+20
-61
lines changed

3 files changed

+20
-61
lines changed
Lines changed: 1 addition & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1,53 +1,5 @@
1-
"""
2-
Dummy GKE workflow job.
31

4-
Students can replace this module with real agent logic while keeping the same container
5-
+ Kubernetes deployment plumbing.
6-
"""
7-
8-
from __future__ import annotations
9-
10-
import json
11-
import os
12-
import sys
13-
import time
14-
import uuid
15-
from datetime import UTC, datetime
16-
17-
18-
def _now_iso() -> str:
19-
return datetime.now(UTC).isoformat()
20-
21-
22-
def _log(event: str, **fields: object) -> None:
23-
payload = {"ts": _now_iso(), "event": event, **fields}
24-
print(json.dumps(payload), flush=True)
25-
26-
27-
def run() -> int:
28-
run_id = str(uuid.uuid4())
29-
loop_count = int(os.getenv("DUMMY_LOOP_COUNT", "5"))
30-
sleep_seconds = float(os.getenv("DUMMY_SLEEP_SECONDS", "3"))
31-
fail_on_purpose = os.getenv("DUMMY_FAIL", "false").lower() == "true"
32-
33-
_log(
34-
"dummy_job_started",
35-
run_id=run_id,
36-
loop_count=loop_count,
37-
sleep_seconds=sleep_seconds,
38-
)
39-
40-
for step in range(1, loop_count + 1):
41-
_log("dummy_job_step", run_id=run_id, step=step, total=loop_count)
42-
time.sleep(sleep_seconds)
43-
44-
if fail_on_purpose:
45-
_log("dummy_job_failed", run_id=run_id, reason="DUMMY_FAIL=true")
46-
return 1
47-
48-
_log("dummy_job_completed", run_id=run_id)
49-
return 0
502

513

524
if __name__ == "__main__":
53-
sys.exit(run())
5+
sys.exit(run())

sp26_gke/workflows/prompts.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@
88
from __future__ import annotations
99

1010
# ---------------------------------------------------------------------------
11-
# Node 1 — Research: generate targeted search queries
11+
# Node 1 — research_topic
12+
# this node uses Tavily to search for data from X
1213
# ---------------------------------------------------------------------------
1314
SEARCH_QUERIES_PROMPT = """\
1415
You are a social media research assistant. Your job is to generate exactly 3 \
@@ -32,7 +33,8 @@
3233
"""
3334

3435
# ---------------------------------------------------------------------------
35-
# Node 2 — Analyze: extract sentiment from search results
36+
# Node 2 — analyze_sentiment
37+
# this node uses llm of choice (currently openAI, see sentiment_agent.py) to analyze raw results from node 1
3638
# ---------------------------------------------------------------------------
3739
SENTIMENT_ANALYSIS_PROMPT = """\
3840
You are an expert social media sentiment analyst. Below are raw search results \
@@ -78,7 +80,8 @@
7880
"""
7981

8082
# ---------------------------------------------------------------------------
81-
# Node 3 — Report: generate a polished, human-readable sentiment report
83+
# Node 3 — generate_report
84+
# this node uses llm of choice (currently openAI, see sentiment_agent.py) to produce structured sentiment report
8285
# ---------------------------------------------------------------------------
8386
REPORT_GENERATION_PROMPT = """\
8487
You are a senior social media intelligence analyst producing a sentiment \

sp26_gke/workflows/sentiment_agent.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,20 @@
11
"""
2-
LangGraph sentiment analysis agent for X / social media.
2+
LangGraph sentiment analysis agent X.
33
4-
Runs a 3-node pipeline:
5-
1. research_topic — search X/social media via Tavily
6-
2. analyze_sentiment — Gemini analyses raw results
7-
3. generate_report — Gemini produces a structured sentiment report
4+
Runs a 3-node Graph API using LangGraph:
5+
Node 1. research_topic — uses Tavily to search X
6+
Node 2. analyze_sentiment — LLM analyses raw results
7+
Node 3. generate_report — LLM produces a structured sentiment report
88
9-
Usage (local):
10-
GEMINI_API_KEY=... TAVILY_API_KEY=... pixi run sentiment-agent
9+
Current Notes:
10+
- currently using OpenAI but planning to switch to Gemini once we have credits
11+
- right now prompt is hard coded (see DEFAULT_TOPIC), will switch to a more usable UI that allows for custom prompts
12+
- as of now, topic can be overridden with the SENTIMENT_TOPIC environment variable
13+
- currently does not support cronjob
1114
12-
The topic defaults to "latest hot topics in AI" but can be overridden
13-
with the SENTIMENT_TOPIC environment variable.
15+
Usage (local):
16+
GEMINI_API_KEY=... TAVILY_API_KEY=...
17+
pixi run sentiment-agent
1418
"""
1519

1620
from __future__ import annotations

0 commit comments

Comments
 (0)