重构:统一使用索引(Index)替代位置(Position)进行状态判断
## 主要变更 ### 架构改进 - 明确索引(Index)与偏移(Offset)的职责分离 - Index: 记录序号(逻辑概念),用于状态判断 - Offset: 文件字节位置(物理概念),仅用于 I/O 操作 ### API 变更 - 删除所有 Position 相关方法: - `LogCursor.StartPos()/EndPos()` - `LogTailer.GetStartPos()/GetEndPos()` - `TopicProcessor.GetProcessingPosition()/GetReadPosition()` - `Seqlog.GetProcessingPosition()/GetReadPosition()` - 新增索引方法: - `LogCursor.StartIndex()/EndIndex()` - `LogTailer.GetStartIndex()/GetEndIndex()` - `TopicProcessor.GetProcessingIndex()/GetReadIndex()` - `Seqlog.GetProcessingIndex()/GetReadIndex()` - `Seqlog.GetProcessor()` - 获取 processor 实例以访问 Index ### 查询接口变更 - `RecordQuery.QueryOldest(startIndex, count, startIdx, endIdx)` - 使用索引参数 - `RecordQuery.QueryNewest(endIndex, count, startIdx, endIdx)` - 使用索引参数 - `RecordQuery.QueryAt(position, direction, count, startIdx, endIdx)` - startIdx/endIdx 用于状态判断 ### 性能优化 - 状态判断改用整数比较,不再需要计算偏移量 - 减少不必要的索引到偏移的转换 - 只在实际文件 I/O 时才获取 offset ### 测试更新 - 更新所有测试用例使用新的 Index API - 更新示例代码(topic_processor_example.go, webapp/main.go) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
149
example/index_example.go
Normal file
149
example/index_example.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"code.tczkiot.com/seqlog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logPath := "test_seqlog/app.log"
|
||||
|
||||
// ===== 示例 1:使用带索引的写入器 =====
|
||||
fmt.Println("=== 示例 1:带索引的写入器 ===")
|
||||
|
||||
// 创建索引
|
||||
index, err := seqlog.NewRecordIndex(logPath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer index.Close()
|
||||
|
||||
// 创建写入器(使用共享索引)
|
||||
writer, err := seqlog.NewLogWriter(logPath, index)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// 写入日志时,索引会自动更新
|
||||
for i := 1; i <= 10; i++ {
|
||||
data := fmt.Sprintf("日志记录 #%d", i)
|
||||
offset, err := writer.Append([]byte(data))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("写入: offset=%d, data=%s\n", offset, data)
|
||||
}
|
||||
|
||||
writer.Close()
|
||||
fmt.Printf("索引文件已创建: %s.idx\n\n", logPath)
|
||||
|
||||
// ===== 示例 2:使用索引进行快速查询 =====
|
||||
fmt.Println("=== 示例 2:带索引的查询器 ===")
|
||||
|
||||
// 先获取索引(由 writer 创建)
|
||||
index2, err := seqlog.NewRecordIndex(logPath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer index2.Close()
|
||||
|
||||
// 创建查询器(使用外部索引)
|
||||
query, err := seqlog.NewRecordQuery(logPath, index2)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer query.Close()
|
||||
|
||||
// 获取记录总数(直接从索引读取,O(1))
|
||||
count, err := query.GetRecordCount()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("记录总数: %d\n", count)
|
||||
|
||||
// 可以直接使用共享的索引获取偏移量
|
||||
offset, err := index.GetOffset(5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("第 5 条记录的偏移: %d\n", offset)
|
||||
|
||||
// 向后查询(使用索引,高效)
|
||||
backward, err := query.QueryAt(offset, -1, 3, 0, offset)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("向后查询 3 条记录:\n")
|
||||
for i, rws := range backward {
|
||||
fmt.Printf(" [%d] 状态=%s, 数据=%s\n", i, rws.Status, string(rws.Record.Data))
|
||||
}
|
||||
|
||||
// 向前查询(顺序读取)
|
||||
forward, err := query.QueryAt(offset, 1, 3, 0, offset)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("向前查询 3 条记录:\n")
|
||||
for i, rws := range forward {
|
||||
fmt.Printf(" [%d] 状态=%s, 数据=%s\n", i, rws.Status, string(rws.Record.Data))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
|
||||
// ===== 示例 3:索引的自动恢复和重建 =====
|
||||
fmt.Println("=== 示例 3:索引恢复 ===")
|
||||
|
||||
// 如果索引文件存在,会自动加载
|
||||
// 如果索引文件不存在或损坏,会自动重建
|
||||
index3, err := seqlog.NewRecordIndex(logPath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("索引已加载: %d 条记录\n", index3.Count())
|
||||
fmt.Printf("最后一条记录偏移: %d\n", index3.LastOffset())
|
||||
|
||||
// 二分查找:根据偏移量查找索引位置
|
||||
idx := index3.FindIndex(offset)
|
||||
fmt.Printf("偏移量 %d 对应的索引位置: %d\n\n", offset, idx)
|
||||
index3.Close()
|
||||
|
||||
// ===== 示例 4:追加写入(索引自动更新)=====
|
||||
fmt.Println("=== 示例 4:追加写入 ===")
|
||||
|
||||
// 重新打开索引和写入器,追加新数据
|
||||
index5, err := seqlog.NewRecordIndex(logPath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer index5.Close()
|
||||
|
||||
writer, err = seqlog.NewLogWriter(logPath, index5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 11; i <= 15; i++ {
|
||||
data := fmt.Sprintf("追加记录 #%d", i)
|
||||
offset, err := writer.Append([]byte(data))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("追加: offset=%d, data=%s\n", offset, data)
|
||||
}
|
||||
|
||||
writer.Close()
|
||||
|
||||
// 验证索引已更新
|
||||
index4, err := seqlog.NewRecordIndex(logPath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer index4.Close()
|
||||
|
||||
fmt.Printf("索引已更新: 现有 %d 条记录\n", index4.Count())
|
||||
|
||||
fmt.Println("\n=== 所有示例完成 ===")
|
||||
}
|
||||
116
example/topic_processor_example.go
Normal file
116
example/topic_processor_example.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
|
||||
"code.tczkiot.com/seqlog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// ===== TopicProcessor 作为聚合器使用 =====
|
||||
fmt.Println("=== TopicProcessor 聚合器示例 ===\n")
|
||||
|
||||
// 创建 TopicProcessor(提供空 handler)
|
||||
logger := slog.Default()
|
||||
tp, err := seqlog.NewTopicProcessor("test_seqlog", "app", logger, &seqlog.TopicConfig{
|
||||
Handler: func(rec *seqlog.Record) error {
|
||||
return nil // 示例中不需要处理
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("创建 TopicProcessor 失败: %v", err)
|
||||
}
|
||||
|
||||
// ===== 1. 写入数据 =====
|
||||
fmt.Println("1. 写入数据:")
|
||||
for i := 1; i <= 5; i++ {
|
||||
data := fmt.Sprintf("消息 #%d", i)
|
||||
offset, err := tp.Write([]byte(data))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf(" 写入成功: offset=%d, data=%s\n", offset, data)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// ===== 2. 获取记录总数 =====
|
||||
fmt.Println("2. 查询记录总数:")
|
||||
count := tp.GetRecordCount()
|
||||
fmt.Printf(" 总共 %d 条记录\n\n", count)
|
||||
|
||||
// ===== 3. 获取索引 =====
|
||||
fmt.Println("3. 使用索引:")
|
||||
index := tp.Index()
|
||||
fmt.Printf(" 索引记录数: %d\n", index.Count())
|
||||
fmt.Printf(" 最后偏移: %d\n\n", index.LastOffset())
|
||||
|
||||
// ===== 4. 使用查询器查询 =====
|
||||
fmt.Println("4. 查询记录:")
|
||||
|
||||
// 查询最老的 3 条记录(从索引 0 开始)
|
||||
oldest, err := tp.QueryOldest(0, 3)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println(" 查询最老的 3 条:")
|
||||
for i, rws := range oldest {
|
||||
fmt.Printf(" [%d] 状态=%s, 数据=%s\n", i, rws.Status, string(rws.Record.Data))
|
||||
}
|
||||
|
||||
// 查询最新的 2 条记录(从最后一条开始)
|
||||
totalCount := tp.GetRecordCount()
|
||||
newest, err := tp.QueryNewest(totalCount-1, 2)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println(" 查询最新的 2 条:")
|
||||
for i, rws := range newest {
|
||||
fmt.Printf(" [%d] 状态=%s, 数据=%s\n", i, rws.Status, string(rws.Record.Data))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// ===== 5. 使用游标读取 =====
|
||||
fmt.Println("5. 使用游标读取:")
|
||||
cursor, err := tp.Cursor()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
// 读取 3 条记录
|
||||
records, err := cursor.NextRange(3)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf(" 读取了 %d 条记录:\n", len(records))
|
||||
for i, rec := range records {
|
||||
fmt.Printf(" [%d] %s\n", i, string(rec.Data))
|
||||
}
|
||||
|
||||
// 提交游标位置
|
||||
cursor.Commit()
|
||||
fmt.Printf(" 游标位置: start=%d, end=%d\n\n", cursor.StartIndex(), cursor.EndIndex())
|
||||
|
||||
// ===== 6. 继续写入 =====
|
||||
fmt.Println("6. 继续写入:")
|
||||
for i := 6; i <= 8; i++ {
|
||||
data := fmt.Sprintf("消息 #%d", i)
|
||||
offset, _ := tp.Write([]byte(data))
|
||||
fmt.Printf(" 写入成功: offset=%d, data=%s\n", offset, data)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// ===== 7. 再次查询总数 =====
|
||||
fmt.Println("7. 更新后的记录总数:")
|
||||
count = tp.GetRecordCount()
|
||||
fmt.Printf(" 总共 %d 条记录\n\n", count)
|
||||
|
||||
// ===== 8. 获取统计信息 =====
|
||||
fmt.Println("8. 统计信息:")
|
||||
stats := tp.GetStats()
|
||||
fmt.Printf(" 写入: %d 条, %d 字节\n", stats.WriteCount, stats.WriteBytes)
|
||||
|
||||
fmt.Println("\n=== 所有示例完成 ===")
|
||||
}
|
||||
65
example/webapp/README.md
Normal file
65
example/webapp/README.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Seqlog Web 演示
|
||||
|
||||
一个简单的 Web 应用,展示 Seqlog 的实际使用场景。
|
||||
|
||||
## 功能
|
||||
|
||||
### 后端模拟业务
|
||||
- 每 2 秒自动生成业务日志
|
||||
- 随机生成不同 topic(app、api、database、cache)
|
||||
- 随机生成不同操作(查询、插入、更新、删除、备份、恢复、同步等)
|
||||
- **随机日志大小**(2KB ~ 10MB):
|
||||
- 80% 小日志(2KB - 100KB)
|
||||
- 15% 中日志(100KB - 1MB)
|
||||
- 5% 大日志(1MB - 10MB)
|
||||
|
||||
### Web 查询界面
|
||||
- 查看所有 topics
|
||||
- 查看每个 topic 的统计信息(显示实际字节数)
|
||||
- 查询日志(支持向前/向后翻页)
|
||||
- 实时自动刷新
|
||||
- 日志状态标注(已处理/处理中/待处理)
|
||||
|
||||
## 快速启动
|
||||
|
||||
```bash
|
||||
cd example/webapp
|
||||
go run main.go
|
||||
```
|
||||
|
||||
访问: http://localhost:8080
|
||||
|
||||
## 使用说明
|
||||
|
||||
1. **选择 Topic**: 点击左侧的 topic 列表
|
||||
2. **查看统计**: 左侧会显示该 topic 的统计信息(包括总字节数)
|
||||
3. **查看日志**: 右侧显示日志内容,带状态标注
|
||||
4. **刷新**: 点击"刷新日志"按钮或等待自动刷新
|
||||
5. **翻页**: 使用"向前翻页"和"向后翻页"按钮
|
||||
6. **自定义范围**: 修改显示范围的数字,控制查询条数
|
||||
|
||||
## 界面说明
|
||||
|
||||
- **绿色边框**: 已处理的日志
|
||||
- **黄色边框**: 正在处理的日志
|
||||
- **灰色边框**: 待处理的日志
|
||||
|
||||
## 性能测试
|
||||
|
||||
由于日志大小范围很大(2KB ~ 10MB),可以观察到:
|
||||
- 小日志处理速度很快
|
||||
- 大日志会占用更多存储空间
|
||||
- 统计信息会显示真实的字节数增长
|
||||
|
||||
## API 接口
|
||||
|
||||
- `GET /api/topics` - 获取所有 topics
|
||||
- `GET /api/stats?topic=<name>` - 获取统计信息
|
||||
- `GET /api/query?topic=<name>&backward=10&forward=10` - 查询日志
|
||||
- `POST /api/write` - 手动写入日志
|
||||
|
||||
## 技术栈
|
||||
|
||||
- 后端: Go + Seqlog
|
||||
- 前端: 原生 HTML/CSS/JavaScript
|
||||
- 无需额外依赖
|
||||
634
example/webapp/main.go
Normal file
634
example/webapp/main.go
Normal file
@@ -0,0 +1,634 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"code.tczkiot.com/seqlog"
|
||||
)
|
||||
|
||||
var (
|
||||
seq *seqlog.Seqlog
|
||||
logger *slog.Logger
|
||||
queryCache = make(map[string]*seqlog.RecordQuery)
|
||||
queryCacheMu sync.RWMutex
|
||||
)
|
||||
|
||||
func main() {
|
||||
// 初始化
|
||||
logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
}))
|
||||
|
||||
// 创建 Seqlog
|
||||
seq = seqlog.NewSeqlog("logs", logger, func(topic string, rec *seqlog.Record) error {
|
||||
// 简单的日志处理:只打印摘要信息
|
||||
dataPreview := string(rec.Data)
|
||||
if len(dataPreview) > 100 {
|
||||
dataPreview = dataPreview[:100] + "..."
|
||||
}
|
||||
logger.Info("处理日志", "topic", topic, "size", len(rec.Data), "preview", dataPreview)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := seq.Start(); err != nil {
|
||||
logger.Error("启动失败", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer seq.Stop()
|
||||
|
||||
logger.Info("Seqlog 已启动")
|
||||
|
||||
// 启动后台业务模拟
|
||||
go simulateBusiness()
|
||||
|
||||
// 启动 Web 服务器
|
||||
http.HandleFunc("/", handleIndex)
|
||||
http.HandleFunc("/api/topics", handleTopics)
|
||||
http.HandleFunc("/api/stats", handleStats)
|
||||
http.HandleFunc("/api/query", handleQuery)
|
||||
http.HandleFunc("/api/write", handleWrite)
|
||||
|
||||
addr := ":8080"
|
||||
logger.Info("Web 服务器启动", "地址", "http://localhost"+addr)
|
||||
if err := http.ListenAndServe(addr, nil); err != nil {
|
||||
logger.Error("服务器错误", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 模拟业务写日志
|
||||
func simulateBusiness() {
|
||||
topics := []string{"app", "api", "database", "cache"}
|
||||
actions := []string{"查询", "插入", "更新", "删除", "连接", "断开", "备份", "恢复", "同步"}
|
||||
status := []string{"成功", "失败", "超时", "重试"}
|
||||
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
// 随机选择 topic 和内容
|
||||
topic := topics[rand.Intn(len(topics))]
|
||||
action := actions[rand.Intn(len(actions))]
|
||||
st := status[rand.Intn(len(status))]
|
||||
|
||||
// 随机生成日志大小:2KB 到 10MB
|
||||
// 80% 概率生成小日志(2KB-100KB)
|
||||
// 15% 概率生成中日志(100KB-1MB)
|
||||
// 5% 概率生成大日志(1MB-10MB)
|
||||
var logSize int
|
||||
prob := rand.Intn(100)
|
||||
if prob < 80 {
|
||||
// 2KB - 100KB
|
||||
logSize = 2*1024 + rand.Intn(98*1024)
|
||||
} else if prob < 95 {
|
||||
// 100KB - 1MB
|
||||
logSize = 100*1024 + rand.Intn(924*1024)
|
||||
} else {
|
||||
// 1MB - 10MB
|
||||
logSize = 1024*1024 + rand.Intn(9*1024*1024)
|
||||
}
|
||||
|
||||
// 生成日志内容
|
||||
header := fmt.Sprintf("[%s] %s %s - 用时: %dms | 数据大小: %s | ",
|
||||
time.Now().Format("15:04:05"),
|
||||
action,
|
||||
st,
|
||||
rand.Intn(1000),
|
||||
formatBytes(int64(logSize)))
|
||||
|
||||
// 填充随机数据到指定大小
|
||||
data := make([]byte, logSize)
|
||||
copy(data, []byte(header))
|
||||
|
||||
// 填充可读的模拟数据
|
||||
fillOffset := len(header)
|
||||
patterns := []string{
|
||||
"user_id=%d, session=%x, ip=%d.%d.%d.%d, ",
|
||||
"query_time=%dms, rows=%d, cached=%v, ",
|
||||
"error_code=%d, retry_count=%d, ",
|
||||
"request_id=%x, trace_id=%x, ",
|
||||
}
|
||||
|
||||
for fillOffset < logSize-100 {
|
||||
pattern := patterns[rand.Intn(len(patterns))]
|
||||
var chunk string
|
||||
switch pattern {
|
||||
case patterns[0]:
|
||||
chunk = fmt.Sprintf(pattern, rand.Intn(10000), rand.Intn(0xFFFFFF),
|
||||
rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256))
|
||||
case patterns[1]:
|
||||
chunk = fmt.Sprintf(pattern, rand.Intn(1000), rand.Intn(10000), rand.Intn(2) == 1)
|
||||
case patterns[2]:
|
||||
chunk = fmt.Sprintf(pattern, rand.Intn(500), rand.Intn(5))
|
||||
case patterns[3]:
|
||||
chunk = fmt.Sprintf(pattern, rand.Intn(0xFFFFFFFF), rand.Intn(0xFFFFFFFF))
|
||||
}
|
||||
|
||||
remaining := logSize - fillOffset
|
||||
if len(chunk) > remaining {
|
||||
chunk = chunk[:remaining]
|
||||
}
|
||||
copy(data[fillOffset:], []byte(chunk))
|
||||
fillOffset += len(chunk)
|
||||
}
|
||||
|
||||
// 写入日志
|
||||
if _, err := seq.Write(topic, data); err != nil {
|
||||
logger.Error("写入日志失败", "error", err, "size", logSize)
|
||||
} else {
|
||||
logger.Info("写入日志", "topic", topic, "size", formatBytes(int64(logSize)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatBytes(bytes int64) string {
|
||||
if bytes < 1024 {
|
||||
return fmt.Sprintf("%d B", bytes)
|
||||
}
|
||||
if bytes < 1024*1024 {
|
||||
return fmt.Sprintf("%.1f KB", float64(bytes)/1024)
|
||||
}
|
||||
return fmt.Sprintf("%.2f MB", float64(bytes)/1024/1024)
|
||||
}
|
||||
|
||||
// 首页
|
||||
func handleIndex(w http.ResponseWriter, r *http.Request) {
|
||||
html := `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Seqlog 日志查询</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
||||
max-width: 1400px;
|
||||
margin: 0 auto;
|
||||
padding: 20px;
|
||||
background: #f5f5f5;
|
||||
}
|
||||
.header {
|
||||
background: white;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 20px;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||
}
|
||||
h1 {
|
||||
margin: 0;
|
||||
color: #333;
|
||||
}
|
||||
.subtitle {
|
||||
color: #666;
|
||||
margin-top: 5px;
|
||||
}
|
||||
.container {
|
||||
display: grid;
|
||||
grid-template-columns: 250px 1fr;
|
||||
gap: 20px;
|
||||
}
|
||||
.sidebar {
|
||||
background: white;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||
}
|
||||
.main {
|
||||
background: white;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||
}
|
||||
.topic-list {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
.topic-item {
|
||||
padding: 10px;
|
||||
margin-bottom: 5px;
|
||||
cursor: pointer;
|
||||
border-radius: 4px;
|
||||
transition: background 0.2s;
|
||||
}
|
||||
.topic-item:hover {
|
||||
background: #f0f0f0;
|
||||
}
|
||||
.topic-item.active {
|
||||
background: #007bff;
|
||||
color: white;
|
||||
}
|
||||
.stats {
|
||||
margin-top: 20px;
|
||||
padding: 15px;
|
||||
background: #f8f9fa;
|
||||
border-radius: 4px;
|
||||
}
|
||||
.stat-item {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
margin-bottom: 8px;
|
||||
font-size: 14px;
|
||||
}
|
||||
.controls {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
.btn {
|
||||
padding: 8px 16px;
|
||||
margin-right: 10px;
|
||||
border: none;
|
||||
border-radius: 4px;
|
||||
cursor: pointer;
|
||||
font-size: 14px;
|
||||
}
|
||||
.btn-primary {
|
||||
background: #007bff;
|
||||
color: white;
|
||||
}
|
||||
.btn-secondary {
|
||||
background: #6c757d;
|
||||
color: white;
|
||||
}
|
||||
.log-container {
|
||||
height: 500px;
|
||||
overflow-y: auto;
|
||||
border: 1px solid #ddd;
|
||||
border-radius: 4px;
|
||||
padding: 10px;
|
||||
background: #f8f9fa;
|
||||
font-family: 'Courier New', monospace;
|
||||
font-size: 13px;
|
||||
}
|
||||
.log-entry {
|
||||
padding: 6px 10px;
|
||||
margin-bottom: 4px;
|
||||
background: white;
|
||||
border-left: 3px solid #007bff;
|
||||
border-radius: 2px;
|
||||
word-wrap: break-word;
|
||||
overflow-wrap: break-word;
|
||||
display: -webkit-box;
|
||||
-webkit-line-clamp: 3;
|
||||
-webkit-box-orient: vertical;
|
||||
overflow: hidden;
|
||||
line-height: 1.5;
|
||||
}
|
||||
.log-entry.processed {
|
||||
border-left-color: #28a745;
|
||||
opacity: 0.8;
|
||||
}
|
||||
.log-entry.processing {
|
||||
border-left-color: #ffc107;
|
||||
background: #fff9e6;
|
||||
}
|
||||
.log-entry.pending {
|
||||
border-left-color: #6c757d;
|
||||
opacity: 0.6;
|
||||
}
|
||||
.status-badge {
|
||||
display: inline-block;
|
||||
padding: 2px 8px;
|
||||
border-radius: 3px;
|
||||
font-size: 11px;
|
||||
margin-right: 8px;
|
||||
}
|
||||
.status-processed {
|
||||
background: #d4edda;
|
||||
color: #155724;
|
||||
}
|
||||
.status-processing {
|
||||
background: #fff3cd;
|
||||
color: #856404;
|
||||
}
|
||||
.status-pending {
|
||||
background: #e2e3e5;
|
||||
color: #383d41;
|
||||
}
|
||||
.loading {
|
||||
text-align: center;
|
||||
padding: 20px;
|
||||
color: #666;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="header">
|
||||
<h1>Seqlog 日志查询系统</h1>
|
||||
<div class="subtitle">实时查看和管理应用日志</div>
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<div class="sidebar">
|
||||
<h3>Topics</h3>
|
||||
<ul class="topic-list" id="topicList"></ul>
|
||||
|
||||
<div class="stats" id="stats">
|
||||
<h4>统计信息</h4>
|
||||
<div id="statsContent">选择一个 topic 查看统计</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="main">
|
||||
<div class="controls">
|
||||
<button class="btn btn-primary" onclick="loadLogs()">刷新日志</button>
|
||||
<button class="btn btn-secondary" onclick="queryBackward()">向前翻页</button>
|
||||
<button class="btn btn-secondary" onclick="queryForward()">向后翻页</button>
|
||||
<span style="margin-left: 20px;">显示范围: 前 <input type="number" id="backwardCount" value="10" style="width: 60px;"> 条, 后 <input type="number" id="forwardCount" value="10" style="width: 60px;"> 条</span>
|
||||
</div>
|
||||
|
||||
<div class="log-container" id="logContainer">
|
||||
<div class="loading">选择一个 topic 开始查看日志</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let currentTopic = null;
|
||||
let displayedOffsets = new Set(); // 追踪已显示的日志偏移量
|
||||
|
||||
// 加载 topics
|
||||
async function loadTopics() {
|
||||
const response = await fetch('/api/topics');
|
||||
const topics = await response.json();
|
||||
|
||||
const list = document.getElementById('topicList');
|
||||
list.innerHTML = topics.map(topic =>
|
||||
'<li class="topic-item" onclick="selectTopic(\'' + topic + '\')">' + topic + '</li>'
|
||||
).join('');
|
||||
}
|
||||
|
||||
// 选择 topic
|
||||
function selectTopic(topic) {
|
||||
currentTopic = topic;
|
||||
displayedOffsets.clear(); // 切换 topic 时清空已显示记录
|
||||
|
||||
// 更新选中状态
|
||||
document.querySelectorAll('.topic-item').forEach(item => {
|
||||
item.classList.remove('active');
|
||||
if (item.textContent === topic) {
|
||||
item.classList.add('active');
|
||||
}
|
||||
});
|
||||
|
||||
// 清空容器并重新加载
|
||||
document.getElementById('logContainer').innerHTML = '';
|
||||
loadStats(topic);
|
||||
loadLogs();
|
||||
}
|
||||
|
||||
// 加载统计
|
||||
async function loadStats(topic) {
|
||||
const response = await fetch('/api/stats?topic=' + topic);
|
||||
const stats = await response.json();
|
||||
|
||||
const content = document.getElementById('statsContent');
|
||||
content.innerHTML =
|
||||
'<div class="stat-item"><span>写入:</span><span>' + stats.write_count + ' 条</span></div>' +
|
||||
'<div class="stat-item"><span>处理:</span><span>' + stats.processed_count + ' 条</span></div>' +
|
||||
'<div class="stat-item"><span>错误:</span><span>' + stats.error_count + ' 次</span></div>' +
|
||||
'<div class="stat-item"><span>大小:</span><span>' + formatBytes(stats.write_bytes) + '</span></div>';
|
||||
}
|
||||
|
||||
// 加载日志
|
||||
async function loadLogs() {
|
||||
if (!currentTopic) return;
|
||||
|
||||
const backward = document.getElementById('backwardCount').value;
|
||||
const forward = document.getElementById('forwardCount').value;
|
||||
|
||||
const response = await fetch('/api/query?topic=' + currentTopic +
|
||||
'&backward=' + backward + '&forward=' + forward);
|
||||
const data = await response.json();
|
||||
|
||||
const container = document.getElementById('logContainer');
|
||||
|
||||
if (data.records.length === 0 && displayedOffsets.size === 0) {
|
||||
container.innerHTML = '<div class="loading">暂无日志</div>';
|
||||
return;
|
||||
}
|
||||
|
||||
// 过滤出新记录
|
||||
const newRecords = data.records.filter(r => !displayedOffsets.has(r.offset));
|
||||
|
||||
if (newRecords.length > 0) {
|
||||
// 生成新记录的 HTML
|
||||
const newHTML = newRecords.map(r => {
|
||||
displayedOffsets.add(r.offset); // 标记为已显示
|
||||
|
||||
// 解析状态,处理可能的状态值
|
||||
let statusClass = 'pending';
|
||||
let statusText = '待处理';
|
||||
let badgeClass = 'status-pending';
|
||||
|
||||
if (r.status === 'StatusProcessed' || r.status === 'processed') {
|
||||
statusClass = 'processed';
|
||||
statusText = '已处理';
|
||||
badgeClass = 'status-processed';
|
||||
} else if (r.status === 'StatusProcessing' || r.status === 'processing') {
|
||||
statusClass = 'processing';
|
||||
statusText = '处理中';
|
||||
badgeClass = 'status-processing';
|
||||
}
|
||||
|
||||
return '<div class="log-entry ' + statusClass + '" data-offset="' + r.offset + '">' +
|
||||
'<span class="status-badge ' + badgeClass + '">' + statusText + '</span>' +
|
||||
r.data +
|
||||
'</div>';
|
||||
}).join('');
|
||||
|
||||
// 追加新记录
|
||||
container.innerHTML += newHTML;
|
||||
|
||||
// 自动滚动到底部
|
||||
container.scrollTop = container.scrollHeight;
|
||||
}
|
||||
}
|
||||
|
||||
function queryBackward() {
|
||||
loadLogs();
|
||||
}
|
||||
|
||||
function queryForward() {
|
||||
loadLogs();
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (bytes < 1024) return bytes + ' B';
|
||||
if (bytes < 1024 * 1024) return (bytes / 1024).toFixed(1) + ' KB';
|
||||
return (bytes / 1024 / 1024).toFixed(1) + ' MB';
|
||||
}
|
||||
|
||||
// 初始化
|
||||
loadTopics();
|
||||
// 不再自动刷新 topics 列表
|
||||
setInterval(() => {
|
||||
if (currentTopic) {
|
||||
loadStats(currentTopic);
|
||||
loadLogs();
|
||||
}
|
||||
}, 3000); // 每 3 秒刷新日志
|
||||
</script>
|
||||
</body>
|
||||
</html>`
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprint(w, html)
|
||||
}
|
||||
|
||||
// API: 获取所有 topics
|
||||
func handleTopics(w http.ResponseWriter, r *http.Request) {
|
||||
topics := seq.GetTopics()
|
||||
json.NewEncoder(w).Encode(topics)
|
||||
}
|
||||
|
||||
// API: 获取统计信息
|
||||
func handleStats(w http.ResponseWriter, r *http.Request) {
|
||||
topic := r.URL.Query().Get("topic")
|
||||
if topic == "" {
|
||||
http.Error(w, "缺少 topic 参数", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := seq.GetTopicStats(topic)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(stats)
|
||||
}
|
||||
|
||||
// API: 查询日志
|
||||
func handleQuery(w http.ResponseWriter, r *http.Request) {
|
||||
topic := r.URL.Query().Get("topic")
|
||||
if topic == "" {
|
||||
http.Error(w, "缺少 topic 参数", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
backward, _ := strconv.Atoi(r.URL.Query().Get("backward"))
|
||||
forward, _ := strconv.Atoi(r.URL.Query().Get("forward"))
|
||||
|
||||
if backward == 0 {
|
||||
backward = 10
|
||||
}
|
||||
if forward == 0 {
|
||||
forward = 10
|
||||
}
|
||||
|
||||
// 从缓存中获取或创建 query 对象
|
||||
queryCacheMu.Lock()
|
||||
query, exists := queryCache[topic]
|
||||
if !exists {
|
||||
var err error
|
||||
query, err = seq.NewTopicQuery(topic)
|
||||
if err != nil {
|
||||
queryCacheMu.Unlock()
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
queryCache[topic] = query
|
||||
}
|
||||
queryCacheMu.Unlock()
|
||||
|
||||
// 获取当前处理索引和读取索引
|
||||
startIdx := seq.GetProcessingIndex(topic)
|
||||
endIdx := seq.GetReadIndex(topic)
|
||||
|
||||
// 获取索引用于转换
|
||||
processor, err := seq.GetProcessor(topic)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
index := processor.Index()
|
||||
|
||||
// 合并查询结果:向后 + 当前 + 向前
|
||||
var results []*seqlog.RecordWithStatus
|
||||
|
||||
// 向后查询
|
||||
if backward > 0 && startIdx > 0 {
|
||||
startPos, err := index.GetOffset(startIdx)
|
||||
if err == nil {
|
||||
backResults, err := query.QueryAt(startPos, -1, backward, startIdx, endIdx)
|
||||
if err == nil {
|
||||
results = append(results, backResults...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 当前位置
|
||||
if startIdx < endIdx {
|
||||
startPos, err := index.GetOffset(startIdx)
|
||||
if err == nil {
|
||||
currentResults, err := query.QueryAt(startPos, 0, 1, startIdx, endIdx)
|
||||
if err == nil {
|
||||
results = append(results, currentResults...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 向前查询
|
||||
if forward > 0 {
|
||||
startPos, err := index.GetOffset(startIdx)
|
||||
if err == nil {
|
||||
forwardResults, err := query.QueryAt(startPos, 1, forward, startIdx, endIdx)
|
||||
if err == nil {
|
||||
results = append(results, forwardResults...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Record struct {
|
||||
Status string `json:"status"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
records := make([]Record, len(results))
|
||||
for i, r := range results {
|
||||
records[i] = Record{
|
||||
Status: r.Status.String(),
|
||||
Data: string(r.Record.Data),
|
||||
}
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"records": records,
|
||||
"total": len(records),
|
||||
})
|
||||
}
|
||||
|
||||
// API: 手动写入日志
|
||||
func handleWrite(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "POST" {
|
||||
http.Error(w, "只支持 POST", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
var req struct {
|
||||
Topic string `json:"topic"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
offset, err := seq.Write(req.Topic, []byte(req.Data))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"success": true,
|
||||
"offset": offset,
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user