Implement efficient LRU eviction strategy for attribute cache

- Added tracking of last access time and access count to cache entries
- Implemented eviction policy to remove least recently/frequently used entries
- Cache now removes 10% of entries when the cache is full, prioritizing by usage
- Added benchmarks and tests to verify eviction strategy
- Fixed the previously ineffective eviction strategy
- Improved cache efficiency for applications with many diverse object types

🤖 Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
semihalev 2025-03-11 14:20:47 +03:00
commit f1add2d820
13 changed files with 334 additions and 123 deletions

View file

@ -0,0 +1,154 @@
package twig
import (
"fmt"
"reflect"
"testing"
)
// Struct for testing attribute cache
type testType struct {
Field1 string
Field2 int
Field3 bool
}
func (t *testType) Method1() string {
return t.Field1
}
func (t testType) Method2() int {
return t.Field2
}
// Create many distinct types to stress the attribute cache
type dynamicType struct {
name string
fields map[string]interface{}
}
// Custom interface to showcase reflection
type displayable interface {
Display() string
}
func (d dynamicType) Display() string {
return fmt.Sprintf("Type: %s", d.name)
}
// Benchmark the attribute cache with a small number of types
func BenchmarkAttributeCache_FewTypes(b *testing.B) {
// Reset the attribute cache
attributeCache.Lock()
attributeCache.m = make(map[attributeCacheKey]attributeCacheEntry)
attributeCache.currSize = 0
attributeCache.Unlock()
// Create a render context
ctx := NewRenderContext(nil, nil, nil)
defer ctx.Release()
obj := &testType{
Field1: "test",
Field2: 123,
Field3: true,
}
// Run the benchmark
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Access different fields and methods
_, _ = ctx.getAttribute(obj, "Field1")
_, _ = ctx.getAttribute(obj, "Field2")
_, _ = ctx.getAttribute(obj, "Field3")
_, _ = ctx.getAttribute(obj, "Method1")
_, _ = ctx.getAttribute(obj, "Method2")
}
}
// Benchmark the attribute cache with many different types
func BenchmarkAttributeCache_ManyTypes(b *testing.B) {
// Reset the attribute cache
attributeCache.Lock()
attributeCache.m = make(map[attributeCacheKey]attributeCacheEntry)
attributeCache.currSize = 0
attributeCache.Unlock()
// Create a render context
ctx := NewRenderContext(nil, nil, nil)
defer ctx.Release()
// Create 2000 different types (more than the cache limit)
types := make([]interface{}, 2000)
for i := 0; i < 2000; i++ {
types[i] = dynamicType{
name: fmt.Sprintf("Type%d", i),
fields: map[string]interface{}{
"field1": fmt.Sprintf("value%d", i),
"field2": i,
},
}
}
// Run the benchmark
b.ResetTimer()
for i := 0; i < b.N; i++ {
// Access attributes across different types
typeIdx := i % 2000
_, _ = ctx.getAttribute(types[typeIdx], "name")
_, _ = ctx.getAttribute(types[typeIdx], "fields")
}
}
// Verify that the attribute cache properly performs LRU eviction
func TestAttributeCacheLRUEviction(t *testing.T) {
// Reset the attribute cache
attributeCache.Lock()
attributeCache.m = make(map[attributeCacheKey]attributeCacheEntry)
attributeCache.currSize = 0
attributeCache.maxSize = 10 // Small size for testing
attributeCache.Unlock()
// Create a render context
ctx := NewRenderContext(nil, nil, nil)
defer ctx.Release()
// Create 20 different types (more than the cache size)
types := make([]interface{}, 20)
for i := 0; i < 20; i++ {
types[i] = dynamicType{
name: fmt.Sprintf("Type%d", i),
fields: map[string]interface{}{
"field1": fmt.Sprintf("value%d", i),
},
}
}
// First access all types once
for i := 0; i < 20; i++ {
_, _ = ctx.getAttribute(types[i], "name")
}
// Now access the last 5 types more frequently
for i := 0; i < 100; i++ {
typeIdx := 15 + (i % 5) // Types 15-19
_, _ = ctx.getAttribute(types[typeIdx], "name")
}
// Check which types are in the cache
attributeCache.RLock()
defer attributeCache.RUnlock()
// The most recently/frequently used types should be in the cache
for i := 15; i < 20; i++ {
typeKey := attributeCacheKey{
typ: reflect.TypeOf(types[i]),
attr: "name",
}
_, found := attributeCache.m[typeKey]
if !found {
t.Errorf("Expected type %d to be in cache, but it wasn't", i)
}
}
}

View file

@ -127,7 +127,7 @@ func main() {
}
}
directMacroTime := time.Since(startTime)
fmt.Printf(" %d iterations in %v (%.2f µs/op)\n",
fmt.Printf(" %d iterations in %v (%.2f µs/op)\n",
iterations, directMacroTime, float64(directMacroTime.Nanoseconds())/float64(iterations)/1000.0)
// Benchmark imported macro usage
@ -148,7 +148,7 @@ func main() {
}
}
importedMacroTime := time.Since(startTime)
fmt.Printf(" %d iterations in %v (%.2f µs/op)\n",
fmt.Printf(" %d iterations in %v (%.2f µs/op)\n",
iterations, importedMacroTime, float64(importedMacroTime.Nanoseconds())/float64(iterations)/1000.0)
// Benchmark nested macro calls
@ -169,7 +169,7 @@ func main() {
}
}
nestedMacroTime := time.Since(startTime)
fmt.Printf(" %d iterations in %v (%.2f µs/op)\n",
fmt.Printf(" %d iterations in %v (%.2f µs/op)\n",
iterations, nestedMacroTime, float64(nestedMacroTime.Nanoseconds())/float64(iterations)/1000.0)
// Summary
@ -179,9 +179,9 @@ func main() {
fmt.Printf("Direct macro usage: %.2f µs/op\n", float64(directMacroTime.Nanoseconds())/float64(iterations)/1000.0)
fmt.Printf("Imported macro usage: %.2f µs/op\n", float64(importedMacroTime.Nanoseconds())/float64(iterations)/1000.0)
fmt.Printf("Nested macro calls: %.2f µs/op\n", float64(nestedMacroTime.Nanoseconds())/float64(iterations)/1000.0)
fmt.Println("\nRelative Performance:")
fmt.Printf("Imported vs Direct: %.2fx\n", float64(importedMacroTime.Nanoseconds())/float64(directMacroTime.Nanoseconds()))
fmt.Printf("Nested vs Direct: %.2fx\n", float64(nestedMacroTime.Nanoseconds())/float64(directMacroTime.Nanoseconds()))
fmt.Println("==================================================")
}
}

View file

@ -299,4 +299,4 @@ func ifThenElse(condition bool, a, b string) string {
return a
}
return b
}
}

View file

@ -12,7 +12,7 @@ import (
// benchmarkSerialization compares the performance of old and new serialization methods
func benchmarkSerialization() {
fmt.Println("\n=== Template Serialization Benchmark ===")
// Create a more complex template to serialize
engine := twig.New()
source := `
@ -35,23 +35,23 @@ func benchmarkSerialization() {
{% endblock %}
`
engine.RegisterString("template", source)
// Compile the template
tmpl, _ := engine.Load("template")
compiled, _ := tmpl.Compile()
// Serialize using both methods
oldData, _ := oldGobSerialize(compiled)
newData, _ := twig.SerializeCompiledTemplate(compiled)
// Size comparison
fmt.Printf("Old format (gob) size: %d bytes\n", len(oldData))
fmt.Printf("New format (binary) size: %d bytes\n", len(newData))
fmt.Printf("Size reduction: %.2f%%\n\n", (1.0-float64(len(newData))/float64(len(oldData)))*100)
// Benchmark serialization
fmt.Println("Serialization Performance (1000 operations):")
// Old method
iterations := 1000
startOldSer := time.Now()
@ -59,42 +59,42 @@ func benchmarkSerialization() {
_, _ = oldGobSerialize(compiled)
}
oldSerTime := time.Since(startOldSer)
// New method
startNewSer := time.Now()
for i := 0; i < iterations; i++ {
_, _ = twig.SerializeCompiledTemplate(compiled)
}
newSerTime := time.Since(startNewSer)
fmt.Printf("Old serialization: %v (%.2f µs/op)\n", oldSerTime, float64(oldSerTime.Nanoseconds())/float64(iterations)/1000)
fmt.Printf("New serialization: %v (%.2f µs/op)\n", newSerTime, float64(newSerTime.Nanoseconds())/float64(iterations)/1000)
fmt.Printf("Serialization speedup: %.2fx\n\n", float64(oldSerTime.Nanoseconds())/float64(newSerTime.Nanoseconds()))
// Benchmark deserialization
fmt.Println("Deserialization Performance (1000 operations):")
// Old method
startOldDeser := time.Now()
for i := 0; i < iterations; i++ {
_, _ = oldGobDeserialize(oldData)
}
oldDeserTime := time.Since(startOldDeser)
// New method
startNewDeser := time.Now()
for i := 0; i < iterations; i++ {
_, _ = twig.DeserializeCompiledTemplate(newData)
}
newDeserTime := time.Since(startNewDeser)
fmt.Printf("Old deserialization: %v (%.2f µs/op)\n", oldDeserTime, float64(oldDeserTime.Nanoseconds())/float64(iterations)/1000)
fmt.Printf("New deserialization: %v (%.2f µs/op)\n", newDeserTime, float64(newDeserTime.Nanoseconds())/float64(iterations)/1000)
fmt.Printf("Deserialization speedup: %.2fx\n\n", float64(oldDeserTime.Nanoseconds())/float64(newDeserTime.Nanoseconds()))
// Total round-trip comparison
fmt.Println("Round-trip Performance (1000 operations):")
// Old method
startOldTotal := time.Now()
for i := 0; i < iterations; i++ {
@ -102,7 +102,7 @@ func benchmarkSerialization() {
_, _ = oldGobDeserialize(data)
}
oldTotalTime := time.Since(startOldTotal)
// New method
startNewTotal := time.Now()
for i := 0; i < iterations; i++ {
@ -110,11 +110,11 @@ func benchmarkSerialization() {
_, _ = twig.DeserializeCompiledTemplate(data)
}
newTotalTime := time.Since(startNewTotal)
fmt.Printf("Old total: %v (%.2f µs/op)\n", oldTotalTime, float64(oldTotalTime.Nanoseconds())/float64(iterations)/1000)
fmt.Printf("New total: %v (%.2f µs/op)\n", newTotalTime, float64(newTotalTime.Nanoseconds())/float64(iterations)/1000)
fmt.Printf("Overall speedup: %.2fx\n\n", float64(oldTotalTime.Nanoseconds())/float64(newTotalTime.Nanoseconds()))
// Memory usage estimation
templateCount := 100
fmt.Printf("Memory usage for %d templates:\n", templateCount)
@ -127,22 +127,22 @@ func benchmarkSerialization() {
func oldGobSerialize(compiled *twig.CompiledTemplate) ([]byte, error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
if err := enc.Encode(compiled); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// oldGobDeserialize simulates the old gob deserialization
func oldGobDeserialize(data []byte) (*twig.CompiledTemplate, error) {
dec := gob.NewDecoder(bytes.NewReader(data))
var compiled twig.CompiledTemplate
if err := dec.Decode(&compiled); err != nil {
return nil, err
}
return &compiled, nil
}
}

View file

@ -7,9 +7,9 @@ import (
func main() {
fmt.Printf("Running serialization benchmarks at %s\n", time.Now().Format(time.RFC1123))
// Run benchmarks
benchmarkSerialization()
fmt.Println("\nBenchmarks completed successfully.")
}
}

View file

@ -69,14 +69,14 @@ func (c *CompiledTemplate) Size() int {
if c == nil {
return 0
}
// Calculate approximate size
size := 0
size += len(c.Name)
size += len(c.Source)
size += len(c.AST)
size += 16 // Size of int64 fields
return size
}
@ -159,7 +159,7 @@ func writeString(w io.Writer, s string) error {
if err := binary.Write(w, binary.LittleEndian, uint32(len(s))); err != nil {
return err
}
// Write the string data
_, err := w.Write([]byte(s))
return err
@ -172,13 +172,13 @@ func readString(r io.Reader) (string, error) {
if err := binary.Read(r, binary.LittleEndian, &length); err != nil {
return "", err
}
// Read string data
data := make([]byte, length)
if _, err := io.ReadFull(r, data); err != nil {
return "", err
}
return string(data), nil
}
@ -187,41 +187,41 @@ func SerializeCompiledTemplate(compiled *CompiledTemplate) ([]byte, error) {
// Get a buffer from the pool
buf := getBuffer()
defer putBuffer(buf)
// Use binary encoding for metadata (more efficient than gob)
// Write format version (for future compatibility)
if err := binary.Write(buf, binary.LittleEndian, uint8(1)); err != nil {
return nil, fmt.Errorf("failed to serialize version: %w", err)
}
// Write Name as length-prefixed string
if err := writeString(buf, compiled.Name); err != nil {
return nil, fmt.Errorf("failed to serialize name: %w", err)
}
// Write Source as length-prefixed string
if err := writeString(buf, compiled.Source); err != nil {
return nil, fmt.Errorf("failed to serialize source: %w", err)
}
// Write timestamps
if err := binary.Write(buf, binary.LittleEndian, compiled.LastModified); err != nil {
return nil, fmt.Errorf("failed to serialize LastModified: %w", err)
}
if err := binary.Write(buf, binary.LittleEndian, compiled.CompileTime); err != nil {
return nil, fmt.Errorf("failed to serialize CompileTime: %w", err)
}
// Write AST data length followed by data
if err := binary.Write(buf, binary.LittleEndian, uint32(len(compiled.AST))); err != nil {
return nil, fmt.Errorf("failed to serialize AST length: %w", err)
}
if _, err := buf.Write(compiled.AST); err != nil {
return nil, fmt.Errorf("failed to serialize AST data: %w", err)
}
// Return a copy of the buffer data
return bytes.Clone(buf.Bytes()), nil
}
@ -231,13 +231,13 @@ func DeserializeCompiledTemplate(data []byte) (*CompiledTemplate, error) {
if len(data) == 0 {
return nil, fmt.Errorf("empty data cannot be deserialized")
}
// Try the new binary format first
compiled, err := deserializeBinaryFormat(data)
if err == nil {
return compiled, nil
}
// Fall back to the old gob format if binary deserialization fails
// This ensures backward compatibility with previously compiled templates
return deserializeGobFormat(data)
@ -247,64 +247,64 @@ func DeserializeCompiledTemplate(data []byte) (*CompiledTemplate, error) {
func deserializeBinaryFormat(data []byte) (*CompiledTemplate, error) {
// Create a reader for the data
r := bytes.NewReader(data)
// Read and verify format version
var version uint8
if err := binary.Read(r, binary.LittleEndian, &version); err != nil {
return nil, fmt.Errorf("failed to read format version: %w", err)
}
if version != 1 {
return nil, fmt.Errorf("unsupported format version: %d", version)
}
// Create a new compiled template
compiled := new(CompiledTemplate)
// Read Name
var err error
compiled.Name, err = readString(r)
if err != nil {
return nil, fmt.Errorf("failed to read name: %w", err)
}
// Read Source
compiled.Source, err = readString(r)
if err != nil {
return nil, fmt.Errorf("failed to read source: %w", err)
}
// Read timestamps
if err := binary.Read(r, binary.LittleEndian, &compiled.LastModified); err != nil {
return nil, fmt.Errorf("failed to read LastModified: %w", err)
}
if err := binary.Read(r, binary.LittleEndian, &compiled.CompileTime); err != nil {
return nil, fmt.Errorf("failed to read CompileTime: %w", err)
}
// Read AST data length and data
var astLength uint32
if err := binary.Read(r, binary.LittleEndian, &astLength); err != nil {
return nil, fmt.Errorf("failed to read AST length: %w", err)
}
compiled.AST = make([]byte, astLength)
if _, err := io.ReadFull(r, compiled.AST); err != nil {
return nil, fmt.Errorf("failed to read AST data: %w", err)
}
return compiled, nil
}
// deserializeGobFormat deserializes using the old gob format
func deserializeGobFormat(data []byte) (*CompiledTemplate, error) {
dec := gob.NewDecoder(bytes.NewReader(data))
var compiled CompiledTemplate
if err := dec.Decode(&compiled); err != nil {
return nil, fmt.Errorf("failed to deserialize compiled template: %w", err)
}
return &compiled, nil
}

View file

@ -197,13 +197,13 @@ widgets.twig - UI widgets</pre>
// Register templates
templates := map[string]string{
"self_ref.twig": selfRefTemplate,
"scope.twig": scopeTemplate,
"context.twig": contextTemplate,
"library.twig": libraryTemplate,
"use_library.twig": useLibraryTemplate,
"from_import.twig": fromImportTemplate,
"optimization.twig": optimizationTemplate,
"self_ref.twig": selfRefTemplate,
"scope.twig": scopeTemplate,
"context.twig": contextTemplate,
"library.twig": libraryTemplate,
"use_library.twig": useLibraryTemplate,
"from_import.twig": fromImportTemplate,
"optimization.twig": optimizationTemplate,
}
for name, content := range templates {
@ -233,13 +233,13 @@ widgets.twig - UI widgets</pre>
for _, name := range []string{"self_ref.twig", "scope.twig", "context.twig", "use_library.twig", "from_import.twig", "optimization.twig"} {
fmt.Printf("\n----- %s -----\n\n", name)
err := engine.RenderTo(os.Stdout, name, context)
if err != nil {
fmt.Printf("Error rendering template %s: %v\n", name, err)
continue
}
fmt.Println("\n")
}
}
}

View file

@ -30,7 +30,7 @@ func (p *Parser) htmlPreservingTokenize() ([]Token, error) {
// Use a single substring for all pattern searches to reduce allocations
remainingSource := p.source[currentPosition:]
// Check for all possible tag starts, including whitespace control variants
positions := []struct {
pos int
@ -390,7 +390,7 @@ func tokenizeObjectContents(content string, tokens *[]Token, line int) {
commaCount++
}
}
// Pre-grow the tokens slice: each key-value pair creates about 4 tokens on average
estimatedTokenCount := len(*tokens) + (commaCount+1)*4
if cap(*tokens) < estimatedTokenCount {
@ -398,7 +398,7 @@ func tokenizeObjectContents(content string, tokens *[]Token, line int) {
copy(newTokens, *tokens)
*tokens = newTokens
}
// State tracking
inSingleQuote := false
inDoubleQuote := false
@ -419,13 +419,13 @@ func tokenizeObjectContents(content string, tokens *[]Token, line int) {
// Extract the key and value - reuse same slice memory
keyStr := content[start:colonPos]
keyStr = strings.TrimSpace(keyStr)
valueStr := content[colonPos+1:i]
valueStr := content[colonPos+1 : i]
valueStr = strings.TrimSpace(valueStr)
// Check key characteristics once to avoid multiple prefix/suffix checks
keyHasSingleQuotes := len(keyStr) >= 2 && keyStr[0] == '\'' && keyStr[len(keyStr)-1] == '\''
keyHasDoubleQuotes := len(keyStr) >= 2 && keyStr[0] == '"' && keyStr[len(keyStr)-1] == '"'
// Process the key
if keyHasSingleQuotes || keyHasDoubleQuotes {
// Quoted key - add as a string token
@ -449,7 +449,7 @@ func tokenizeObjectContents(content string, tokens *[]Token, line int) {
valueEndsWithBrace := len(valueStr) >= 1 && valueStr[len(valueStr)-1] == '}'
valueStartsWithBracket := len(valueStr) >= 2 && valueStr[0] == '['
valueEndsWithBracket := len(valueStr) >= 1 && valueStr[len(valueStr)-1] == ']'
// Process the value - more complex values need special handling
if valueStartsWithBrace && valueEndsWithBrace {
// Nested object
@ -610,7 +610,7 @@ func splitArrayElements(arrStr string) []string {
}
// Allocate with capacity for estimated number of elements
elements := make([]string, 0, commaCount+1)
// Pre-allocate the string builder with a reasonable capacity
// to avoid frequent reallocations
var current strings.Builder
@ -848,7 +848,7 @@ func (p *Parser) tokenizeAndAppend(source string, tokens *[]Token, line int) {
// tokenizeExpression handles tokenizing expressions inside Twig tags
func (p *Parser) tokenizeExpression(expr string, tokens *[]Token, line int) {
// This is a tokenizer for expressions that handles Twig syntax properly
// First, pre-grow the tokens slice to minimize reallocations
// Estimate: one token per 5 characters in the expression (rough average)
estimatedTokenCount := len(*tokens) + len(expr)/5 + 1
@ -861,7 +861,7 @@ func (p *Parser) tokenizeExpression(expr string, tokens *[]Token, line int) {
// Pre-allocate the string builder with a reasonable capacity
var currentToken strings.Builder
currentToken.Grow(16) // Reasonable size for most identifiers/numbers
var inString bool
var stringDelimiter byte
@ -883,7 +883,7 @@ func (p *Parser) tokenizeExpression(expr string, tokens *[]Token, line int) {
if currentToken.Len() > 0 {
// Reuse the tokenValue to avoid extra allocations
tokenValue := currentToken.String()
// Check if the token is a number
if onlyContainsDigitsOrDot(tokenValue) {
*tokens = append(*tokens, Token{Type: TOKEN_NUMBER, Value: tokenValue, Line: line})
@ -943,21 +943,21 @@ func (p *Parser) tokenizeExpression(expr string, tokens *[]Token, line int) {
nextChar := expr[i+1]
isTwoChar := false
var twoChar string
// Avoiding string concatenation and using direct comparison
if (c == '=' && nextChar == '=') ||
(c == '!' && nextChar == '=') ||
(c == '>' && nextChar == '=') ||
(c == '<' && nextChar == '=') ||
(c == '&' && nextChar == '&') ||
(c == '|' && nextChar == '|') ||
(c == '?' && nextChar == '?') {
(c == '!' && nextChar == '=') ||
(c == '>' && nextChar == '=') ||
(c == '<' && nextChar == '=') ||
(c == '&' && nextChar == '&') ||
(c == '|' && nextChar == '|') ||
(c == '?' && nextChar == '?') {
// Only allocate the string when we need it
twoChar = string([]byte{c, nextChar})
isTwoChar = true
}
if isTwoChar {
*tokens = append(*tokens, Token{Type: TOKEN_OPERATOR, Value: twoChar, Line: line})
i++ // Skip the next character
@ -1013,7 +1013,7 @@ func (p *Parser) tokenizeExpression(expr string, tokens *[]Token, line int) {
// Pre-allocate for numeric tokens
currentToken.Grow(10) // Reasonable for most numbers
// Handle negative sign if present
if c == '-' {
currentToken.WriteByte(c)
@ -1052,7 +1052,7 @@ func (p *Parser) tokenizeExpression(expr string, tokens *[]Token, line int) {
// Add any final token
if currentToken.Len() > 0 {
tokenValue := currentToken.String()
// Check if the final token is a special keyword
// Use direct comparison instead of multiple string checks
if tokenValue == "true" || tokenValue == "false" ||

View file

@ -289,4 +289,4 @@ func TestNestedMacros(t *testing.T) {
t.Errorf("Expected element %q not found in result: %s", element, result)
}
}
}
}

10
node.go
View file

@ -749,7 +749,7 @@ func (n *ExtendsNode) Render(w io.Writer, ctx *RenderContext) error {
// This ensures the parent template knows it's being extended and preserves our blocks
parentCtx := NewRenderContext(ctx.env, ctx.context, ctx.engine)
parentCtx.extending = true // Flag that the parent is being extended
// Ensure the context is released even if an error occurs
defer parentCtx.Release()
@ -1030,7 +1030,7 @@ func (n *MacroNode) CallMacro(w io.Writer, ctx *RenderContext, args ...interface
// Create a new context for the macro
macroCtx := NewRenderContext(ctx.env, nil, ctx.engine)
macroCtx.parent = ctx
// Ensure context is released even in error paths
defer macroCtx.Release()
@ -1111,7 +1111,7 @@ func (n *ImportNode) Render(w io.Writer, ctx *RenderContext) error {
// Create a new context for the imported template
importCtx := NewRenderContext(ctx.env, nil, ctx.engine)
// Ensure context is released even in error paths
defer importCtx.Release()
@ -1131,7 +1131,7 @@ func (n *ImportNode) Render(w io.Writer, ctx *RenderContext) error {
// Set the module variable in the current context
ctx.SetVariable(n.module, macros)
return nil
}
@ -1174,7 +1174,7 @@ func (n *FromImportNode) Render(w io.Writer, ctx *RenderContext) error {
// Create a new context for the imported template
importCtx := NewRenderContext(ctx.env, nil, ctx.engine)
// Ensure context is released even in error paths
defer importCtx.Release()

101
render.go
View file

@ -8,9 +8,11 @@ import (
"math"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
)
// RenderContext holds the state during template rendering
@ -884,21 +886,64 @@ type attributeCacheKey struct {
// attributeCacheEntry represents a cached attribute lookup result
type attributeCacheEntry struct {
fieldIndex int // Index of the field (-1 if not a field)
isMethod bool // Whether this is a method
methodIndex int // Index of the method (-1 if not a method)
ptrMethod bool // Whether the method is on the pointer type
fieldIndex int // Index of the field (-1 if not a field)
isMethod bool // Whether this is a method
methodIndex int // Index of the method (-1 if not a method)
ptrMethod bool // Whether the method is on the pointer type
lastAccess time.Time // When this entry was last accessed
accessCount int // How many times this entry has been accessed
}
// attributeCache caches attribute lookups by type and attribute name
// Uses a simplified LRU strategy for eviction - when cache fills up,
// we remove 10% of the least recently used entries to make room
var attributeCache = struct {
sync.RWMutex
m map[attributeCacheKey]attributeCacheEntry
maxSize int // Maximum number of entries to cache
currSize int // Current number of entries
m map[attributeCacheKey]attributeCacheEntry
maxSize int // Maximum number of entries to cache
currSize int // Current number of entries
evictionPct float64 // Percentage of cache to evict when full (0.0-1.0)
}{
m: make(map[attributeCacheKey]attributeCacheEntry),
maxSize: 1000, // Limit cache to 1000 entries to prevent unbounded growth
m: make(map[attributeCacheKey]attributeCacheEntry),
maxSize: 1000, // Limit cache to 1000 entries to prevent unbounded growth
evictionPct: 0.1, // Evict 10% of entries when cache is full
}
// evictLRUEntries removes the least recently used entries from the cache
// This function assumes that the caller holds the attributeCache lock
func evictLRUEntries() {
// Calculate how many entries to evict
numToEvict := int(float64(attributeCache.maxSize) * attributeCache.evictionPct)
if numToEvict < 1 {
numToEvict = 1 // Always evict at least one entry
}
// Create a slice of entries to sort by last access time
type cacheItem struct {
key attributeCacheKey
entry attributeCacheEntry
}
entries := make([]cacheItem, 0, attributeCache.currSize)
for k, v := range attributeCache.m {
entries = append(entries, cacheItem{k, v})
}
// Sort entries by last access time (oldest first)
sort.Slice(entries, func(i, j int) bool {
// If access counts differ by a significant amount, prefer keeping frequently accessed items
if entries[i].entry.accessCount < entries[j].entry.accessCount/2 {
return true
}
// Otherwise, use recency as the deciding factor
return entries[i].entry.lastAccess.Before(entries[j].entry.lastAccess)
})
// Remove the oldest entries
for i := 0; i < numToEvict && i < len(entries); i++ {
delete(attributeCache.m, entries[i].key)
attributeCache.currSize--
}
}
// getItem gets an item from a container (array, slice, map) by index or key
@ -1022,31 +1067,43 @@ func (ctx *RenderContext) getAttribute(obj interface{}, attr string) (interface{
// Get a read lock to check the cache first
attributeCache.RLock()
entry, found := attributeCache.m[key]
attributeCache.RUnlock()
if found {
// Found in cache, update access stats later with a write lock
attributeCache.RUnlock()
// If not found, perform the reflection lookup with proper locking
if !found {
// Get a write lock for updating the cache
// Update the entry's access statistics with a write lock
attributeCache.Lock()
// Need to check again after acquiring write lock
if cachedEntry, stillExists := attributeCache.m[key]; stillExists {
// Update access time and count
cachedEntry.lastAccess = time.Now()
cachedEntry.accessCount++
attributeCache.m[key] = cachedEntry
entry = cachedEntry
}
attributeCache.Unlock()
} else {
// Not found in cache - release read lock and get write lock for update
attributeCache.RUnlock()
attributeCache.Lock()
// Double-check if another goroutine added it while we were waiting
entry, found = attributeCache.m[key]
if !found {
// Still not found, need to populate the cache
// Check if cache has reached maximum size
if attributeCache.currSize >= attributeCache.maxSize {
// Cache is full, remove a random entry
// This is a simple strategy - in the future, we could use LRU
for k := range attributeCache.m {
delete(attributeCache.m, k)
attributeCache.currSize--
// Just remove one entry for now
break
}
// Cache is full, use our LRU eviction strategy
evictLRUEntries()
}
// Create a new entry with current timestamp
entry = attributeCacheEntry{
fieldIndex: -1,
methodIndex: -1,
lastAccess: time.Now(),
accessCount: 1,
}
// Look for a field

View file

@ -9,7 +9,7 @@ func BenchmarkWriteStringDirect(b *testing.B) {
buf := NewStringBuffer()
defer buf.Release()
longStr := "This is a test string for benchmarking the write performance of direct byte slice conversion"
b.ResetTimer()
for i := 0; i < b.N; i++ {
buf.buf.Reset()
@ -21,7 +21,7 @@ func BenchmarkWriteStringOptimized(b *testing.B) {
buf := NewStringBuffer()
defer buf.Release()
longStr := "This is a test string for benchmarking the write performance of optimized string writing"
b.ResetTimer()
for i := 0; i < b.N; i++ {
buf.buf.Reset()
@ -31,7 +31,7 @@ func BenchmarkWriteStringOptimized(b *testing.B) {
func BenchmarkWriteStringDirect_Discard(b *testing.B) {
longStr := "This is a test string for benchmarking the write performance of direct byte slice conversion"
b.ResetTimer()
for i := 0; i < b.N; i++ {
ioutil.Discard.Write([]byte(longStr))
@ -40,7 +40,7 @@ func BenchmarkWriteStringDirect_Discard(b *testing.B) {
func BenchmarkWriteStringOptimized_Discard(b *testing.B) {
longStr := "This is a test string for benchmarking the write performance of optimized string writing"
b.ResetTimer()
for i := 0; i < b.N; i++ {
WriteString(ioutil.Discard, longStr)

View file

@ -32,11 +32,11 @@ func WriteString(w io.Writer, s string) (int, error) {
if sw, ok := w.(io.StringWriter); ok {
return sw.WriteString(s)
}
// Fallback path - reuse buffer from pool to avoid allocation
buf := GetByteBuffer()
buf.WriteString(s)
n, err := w.Write(buf.Bytes())
PutByteBuffer(buf)
return n, err
}
}