- 将Redis连接方式改为连接池模式,提升连接复用效率 - 修复缓存注释错误,统一标识数据库缓存逻辑 - 添加数据检索结果非空验证,避免空指针异常 - 在数据库操作中添加读写锁保护,确保并发安全性 - 实现数据库查询和执行操作的重试机制,增强稳定性 - 更新配置文件中的缓存和数据库设置,优化缓存策略 - 重构README文档,补充框架特性和性能测试数据 - 添加示例路由配置,完善快速入门指南
415 lines
10 KiB
Go
415 lines
10 KiB
Go
package main
|
||
|
||
import (
|
||
"encoding/json"
|
||
"fmt"
|
||
"io"
|
||
"net/http"
|
||
"runtime"
|
||
"sync"
|
||
"sync/atomic"
|
||
"time"
|
||
)
|
||
|
||
// 压测配置
|
||
type BenchConfig struct {
|
||
URL string
|
||
Concurrency int
|
||
Duration time.Duration
|
||
Timeout time.Duration
|
||
}
|
||
|
||
// 压测结果
|
||
type BenchResult struct {
|
||
TotalRequests int64
|
||
SuccessRequests int64
|
||
FailedRequests int64
|
||
TotalDuration time.Duration
|
||
MinLatency int64 // 纳秒
|
||
MaxLatency int64
|
||
AvgLatency int64
|
||
P50Latency int64 // 50分位
|
||
P90Latency int64 // 90分位
|
||
P99Latency int64 // 99分位
|
||
QPS float64
|
||
}
|
||
|
||
// 延迟收集器
|
||
type LatencyCollector struct {
|
||
latencies []int64
|
||
mu sync.Mutex
|
||
}
|
||
|
||
func (lc *LatencyCollector) Add(latency int64) {
|
||
lc.mu.Lock()
|
||
lc.latencies = append(lc.latencies, latency)
|
||
lc.mu.Unlock()
|
||
}
|
||
|
||
func (lc *LatencyCollector) GetPercentile(p float64) int64 {
|
||
lc.mu.Lock()
|
||
defer lc.mu.Unlock()
|
||
|
||
if len(lc.latencies) == 0 {
|
||
return 0
|
||
}
|
||
|
||
// 简单排序取百分位
|
||
n := len(lc.latencies)
|
||
idx := int(float64(n) * p)
|
||
if idx >= n {
|
||
idx = n - 1
|
||
}
|
||
|
||
// 部分排序找第idx个元素
|
||
return quickSelect(lc.latencies, idx)
|
||
}
|
||
|
||
func quickSelect(arr []int64, k int) int64 {
|
||
if len(arr) == 1 {
|
||
return arr[0]
|
||
}
|
||
|
||
pivot := arr[len(arr)/2]
|
||
var left, right, equal []int64
|
||
|
||
for _, v := range arr {
|
||
if v < pivot {
|
||
left = append(left, v)
|
||
} else if v > pivot {
|
||
right = append(right, v)
|
||
} else {
|
||
equal = append(equal, v)
|
||
}
|
||
}
|
||
|
||
if k < len(left) {
|
||
return quickSelect(left, k)
|
||
} else if k < len(left)+len(equal) {
|
||
return pivot
|
||
}
|
||
return quickSelect(right, k-len(left)-len(equal))
|
||
}
|
||
|
||
func main() {
|
||
// 最大化利用CPU
|
||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||
|
||
fmt.Println("==========================================")
|
||
fmt.Println(" 🔥 HoTime 极限压力测试 🔥")
|
||
fmt.Println("==========================================")
|
||
fmt.Printf("CPU 核心数: %d\n", runtime.NumCPU())
|
||
fmt.Println()
|
||
|
||
// 极限测试配置
|
||
configs := []BenchConfig{
|
||
{URL: "http://127.0.0.1:8081/app/test/hello", Concurrency: 500, Duration: 15 * time.Second, Timeout: 10 * time.Second},
|
||
{URL: "http://127.0.0.1:8081/app/test/hello", Concurrency: 1000, Duration: 15 * time.Second, Timeout: 10 * time.Second},
|
||
{URL: "http://127.0.0.1:8081/app/test/hello", Concurrency: 2000, Duration: 15 * time.Second, Timeout: 10 * time.Second},
|
||
{URL: "http://127.0.0.1:8081/app/test/hello", Concurrency: 5000, Duration: 15 * time.Second, Timeout: 10 * time.Second},
|
||
{URL: "http://127.0.0.1:8081/app/test/hello", Concurrency: 10000, Duration: 15 * time.Second, Timeout: 10 * time.Second},
|
||
}
|
||
|
||
// 检查服务
|
||
fmt.Println("正在检查服务是否可用...")
|
||
if !checkService(configs[0].URL) {
|
||
fmt.Println("❌ 服务不可用,请先启动示例应用")
|
||
return
|
||
}
|
||
fmt.Println("✅ 服务已就绪")
|
||
fmt.Println()
|
||
|
||
// 预热
|
||
fmt.Println("🔄 预热中 (5秒)...")
|
||
warmup(configs[0].URL, 100, 5*time.Second)
|
||
fmt.Println("✅ 预热完成")
|
||
fmt.Println()
|
||
|
||
var maxQPS float64
|
||
var maxConcurrency int
|
||
|
||
// 执行极限测试
|
||
for i, config := range configs {
|
||
fmt.Printf("══════════════════════════════════════════\n")
|
||
fmt.Printf("【极限测试 %d】并发数: %d, 持续时间: %v\n", i+1, config.Concurrency, config.Duration)
|
||
fmt.Printf("══════════════════════════════════════════\n")
|
||
|
||
result := runBenchmark(config)
|
||
printResult(result)
|
||
|
||
if result.QPS > maxQPS {
|
||
maxQPS = result.QPS
|
||
maxConcurrency = config.Concurrency
|
||
}
|
||
|
||
// 检查是否达到瓶颈
|
||
successRate := float64(result.SuccessRequests) / float64(result.TotalRequests) * 100
|
||
if successRate < 95 {
|
||
fmt.Println("\n⚠️ 成功率低于95%,已达到服务极限!")
|
||
break
|
||
}
|
||
|
||
if result.AvgLatency > int64(100*time.Millisecond) {
|
||
fmt.Println("\n⚠️ 平均延迟超过100ms,已达到服务极限!")
|
||
break
|
||
}
|
||
|
||
fmt.Println()
|
||
|
||
// 测试间隔
|
||
if i < len(configs)-1 {
|
||
fmt.Println("冷却 5 秒...")
|
||
time.Sleep(5 * time.Second)
|
||
fmt.Println()
|
||
}
|
||
}
|
||
|
||
// 最终报告
|
||
fmt.Println()
|
||
fmt.Println("══════════════════════════════════════════")
|
||
fmt.Println(" 📊 极限测试总结")
|
||
fmt.Println("══════════════════════════════════════════")
|
||
fmt.Printf("最高 QPS: %.2f 请求/秒\n", maxQPS)
|
||
fmt.Printf("最佳并发数: %d\n", maxConcurrency)
|
||
fmt.Println()
|
||
|
||
// 并发用户估算
|
||
estimateUsers(maxQPS)
|
||
}
|
||
|
||
func checkService(url string) bool {
|
||
client := &http.Client{Timeout: 3 * time.Second}
|
||
resp, err := client.Get(url)
|
||
if err != nil {
|
||
return false
|
||
}
|
||
defer resp.Body.Close()
|
||
return resp.StatusCode == 200
|
||
}
|
||
|
||
func warmup(url string, concurrency int, duration time.Duration) {
|
||
client := &http.Client{
|
||
Timeout: 5 * time.Second,
|
||
Transport: &http.Transport{
|
||
MaxIdleConns: concurrency * 2,
|
||
MaxIdleConnsPerHost: concurrency * 2,
|
||
},
|
||
}
|
||
|
||
done := make(chan struct{})
|
||
var wg sync.WaitGroup
|
||
|
||
for i := 0; i < concurrency; i++ {
|
||
wg.Add(1)
|
||
go func() {
|
||
defer wg.Done()
|
||
for {
|
||
select {
|
||
case <-done:
|
||
return
|
||
default:
|
||
resp, err := client.Get(url)
|
||
if err == nil {
|
||
io.Copy(io.Discard, resp.Body)
|
||
resp.Body.Close()
|
||
}
|
||
}
|
||
}
|
||
}()
|
||
}
|
||
|
||
time.Sleep(duration)
|
||
close(done)
|
||
wg.Wait()
|
||
}
|
||
|
||
func runBenchmark(config BenchConfig) BenchResult {
|
||
var (
|
||
totalRequests int64
|
||
successRequests int64
|
||
failedRequests int64
|
||
totalLatency int64
|
||
minLatency int64 = int64(time.Hour)
|
||
maxLatency int64
|
||
mu sync.Mutex
|
||
)
|
||
|
||
collector := &LatencyCollector{
|
||
latencies: make([]int64, 0, 100000),
|
||
}
|
||
|
||
// 高性能HTTP客户端
|
||
client := &http.Client{
|
||
Timeout: config.Timeout,
|
||
Transport: &http.Transport{
|
||
MaxIdleConns: config.Concurrency * 2,
|
||
MaxIdleConnsPerHost: config.Concurrency * 2,
|
||
MaxConnsPerHost: config.Concurrency * 2,
|
||
IdleConnTimeout: 90 * time.Second,
|
||
DisableKeepAlives: false,
|
||
DisableCompression: true,
|
||
},
|
||
}
|
||
|
||
done := make(chan struct{})
|
||
var wg sync.WaitGroup
|
||
|
||
startTime := time.Now()
|
||
|
||
// 启动并发
|
||
for i := 0; i < config.Concurrency; i++ {
|
||
wg.Add(1)
|
||
go func() {
|
||
defer wg.Done()
|
||
|
||
for {
|
||
select {
|
||
case <-done:
|
||
return
|
||
default:
|
||
reqStart := time.Now()
|
||
success := makeRequest(client, config.URL)
|
||
latency := time.Since(reqStart).Nanoseconds()
|
||
|
||
atomic.AddInt64(&totalRequests, 1)
|
||
atomic.AddInt64(&totalLatency, latency)
|
||
|
||
if success {
|
||
atomic.AddInt64(&successRequests, 1)
|
||
} else {
|
||
atomic.AddInt64(&failedRequests, 1)
|
||
}
|
||
|
||
// 采样收集延迟(每100个请求采样1个,减少内存开销)
|
||
if atomic.LoadInt64(&totalRequests)%100 == 0 {
|
||
collector.Add(latency)
|
||
}
|
||
|
||
mu.Lock()
|
||
if latency < minLatency {
|
||
minLatency = latency
|
||
}
|
||
if latency > maxLatency {
|
||
maxLatency = latency
|
||
}
|
||
mu.Unlock()
|
||
}
|
||
}
|
||
}()
|
||
}
|
||
|
||
time.Sleep(config.Duration)
|
||
close(done)
|
||
wg.Wait()
|
||
|
||
totalDuration := time.Since(startTime)
|
||
|
||
result := BenchResult{
|
||
TotalRequests: totalRequests,
|
||
SuccessRequests: successRequests,
|
||
FailedRequests: failedRequests,
|
||
TotalDuration: totalDuration,
|
||
MinLatency: minLatency,
|
||
MaxLatency: maxLatency,
|
||
P50Latency: collector.GetPercentile(0.50),
|
||
P90Latency: collector.GetPercentile(0.90),
|
||
P99Latency: collector.GetPercentile(0.99),
|
||
}
|
||
|
||
if totalRequests > 0 {
|
||
result.AvgLatency = totalLatency / totalRequests
|
||
result.QPS = float64(totalRequests) / totalDuration.Seconds()
|
||
}
|
||
|
||
return result
|
||
}
|
||
|
||
func makeRequest(client *http.Client, url string) bool {
|
||
resp, err := client.Get(url)
|
||
if err != nil {
|
||
return false
|
||
}
|
||
defer resp.Body.Close()
|
||
|
||
body, err := io.ReadAll(resp.Body)
|
||
if err != nil {
|
||
return false
|
||
}
|
||
|
||
if resp.StatusCode != 200 {
|
||
return false
|
||
}
|
||
|
||
var result map[string]interface{}
|
||
if err := json.Unmarshal(body, &result); err != nil {
|
||
return false
|
||
}
|
||
|
||
if status, ok := result["status"].(float64); !ok || status != 0 {
|
||
return false
|
||
}
|
||
|
||
return true
|
||
}
|
||
|
||
func printResult(result BenchResult) {
|
||
successRate := float64(result.SuccessRequests) / float64(result.TotalRequests) * 100
|
||
|
||
fmt.Printf("总请求数: %d\n", result.TotalRequests)
|
||
fmt.Printf("成功请求: %d\n", result.SuccessRequests)
|
||
fmt.Printf("失败请求: %d\n", result.FailedRequests)
|
||
fmt.Printf("成功率: %.2f%%\n", successRate)
|
||
fmt.Printf("总耗时: %v\n", result.TotalDuration.Round(time.Millisecond))
|
||
fmt.Printf("QPS: %.2f 请求/秒\n", result.QPS)
|
||
fmt.Println("------------------------------------------")
|
||
fmt.Printf("最小延迟: %v\n", time.Duration(result.MinLatency).Round(time.Microsecond))
|
||
fmt.Printf("平均延迟: %v\n", time.Duration(result.AvgLatency).Round(time.Microsecond))
|
||
fmt.Printf("P50延迟: %v\n", time.Duration(result.P50Latency).Round(time.Microsecond))
|
||
fmt.Printf("P90延迟: %v\n", time.Duration(result.P90Latency).Round(time.Microsecond))
|
||
fmt.Printf("P99延迟: %v\n", time.Duration(result.P99Latency).Round(time.Microsecond))
|
||
fmt.Printf("最大延迟: %v\n", time.Duration(result.MaxLatency).Round(time.Microsecond))
|
||
|
||
// 性能评级
|
||
fmt.Print("\n性能评级: ")
|
||
switch {
|
||
case result.QPS >= 200000:
|
||
fmt.Println("🏆 卓越 (QPS >= 200K)")
|
||
case result.QPS >= 100000:
|
||
fmt.Println("🚀 优秀 (QPS >= 100K)")
|
||
case result.QPS >= 50000:
|
||
fmt.Println("⭐ 良好 (QPS >= 50K)")
|
||
case result.QPS >= 20000:
|
||
fmt.Println("👍 中上 (QPS >= 20K)")
|
||
case result.QPS >= 10000:
|
||
fmt.Println("📊 中等 (QPS >= 10K)")
|
||
default:
|
||
fmt.Println("⚠️ 一般 (QPS < 10K)")
|
||
}
|
||
}
|
||
|
||
func estimateUsers(maxQPS float64) {
|
||
fmt.Println("📈 并发用户数估算(基于不同使用场景):")
|
||
fmt.Println("------------------------------------------")
|
||
|
||
scenarios := []struct {
|
||
name string
|
||
requestInterval float64 // 用户平均请求间隔(秒)
|
||
}{
|
||
{"高频交互(每秒1次请求)", 1},
|
||
{"活跃用户(每5秒1次请求)", 5},
|
||
{"普通浏览(每10秒1次请求)", 10},
|
||
{"低频访问(每30秒1次请求)", 30},
|
||
{"偶尔访问(每60秒1次请求)", 60},
|
||
}
|
||
|
||
for _, s := range scenarios {
|
||
users := maxQPS * s.requestInterval
|
||
fmt.Printf("%-30s ~%d 用户\n", s.name, int(users))
|
||
}
|
||
|
||
fmt.Println()
|
||
fmt.Println("💡 实际生产环境建议保留 30-50% 性能余量")
|
||
fmt.Printf(" 安全并发用户数: %d - %d (普通浏览场景)\n",
|
||
int(maxQPS*10*0.5), int(maxQPS*10*0.7))
|
||
}
|