|
@@ -34,7 +34,7 @@ type LlamaServer interface {
|
|
WaitUntilRunning(ctx context.Context) error
|
|
WaitUntilRunning(ctx context.Context) error
|
|
Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
|
|
Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
|
|
Embedding(ctx context.Context, prompt string) ([]float64, error)
|
|
Embedding(ctx context.Context, prompt string) ([]float64, error)
|
|
- Embed(ctx context.Context, input []string) ([][]float64, error)
|
|
|
|
|
|
+ Embed(ctx context.Context, input []string) ([][]float32, error)
|
|
Tokenize(ctx context.Context, content string) ([]int, error)
|
|
Tokenize(ctx context.Context, content string) ([]int, error)
|
|
Detokenize(ctx context.Context, tokens []int) (string, error)
|
|
Detokenize(ctx context.Context, tokens []int) (string, error)
|
|
Close() error
|
|
Close() error
|
|
@@ -847,10 +847,10 @@ type EmbedRequest struct {
|
|
}
|
|
}
|
|
|
|
|
|
type EmbedResponse struct {
|
|
type EmbedResponse struct {
|
|
- Embedding [][]float64 `json:"embedding"`
|
|
|
|
|
|
+ Embedding [][]float32 `json:"embedding"`
|
|
}
|
|
}
|
|
|
|
|
|
-func (s *llmServer) Embed(ctx context.Context, input []string) ([][]float64, error) {
|
|
|
|
|
|
+func (s *llmServer) Embed(ctx context.Context, input []string) ([][]float32, error) {
|
|
if err := s.sem.Acquire(ctx, 1); err != nil {
|
|
if err := s.sem.Acquire(ctx, 1); err != nil {
|
|
slog.Error("Failed to acquire semaphore", "error", err)
|
|
slog.Error("Failed to acquire semaphore", "error", err)
|
|
return nil, err
|
|
return nil, err
|