* bf16

* tests

* gpt-oss

* enable gptoss for engine

* rough estimate

* convert to mxfp4

* handle safetensors U8

* clamp glu/linear

* update tokenizer

* MXFP4 support

This implements the Open Compute Microscaling (MX) FP4 format
as a tensor type with backend implementations focusing
on mulmat and mulmatid on CPU, CUDA, and Metal.

* Unit tests for MXFP4 support

This exercises various operations and shapes on both CPU and GPU (if detected
on the system)

* cuda graph

* unit test adjustments

* cuda: optimize memory access

Read 4 bytes at a time (8 elements) when performing mul_mat_vec_mxfp4

* mac: fix crash on old macos versions

cblas_sgemm is only supported on v13.3 and up, however bf16 is
only supported on v14+ so we were falling back to ggml-blas and
crashing on bf16 tensors.  Checking for the function being null
seems to be the simplest way to condittionally avoid registering the
backend.

* server: Minimum context length for gptoss

This model requires a minimum context length of 8192 to function
effectively. Users can set higher values through all normal mechanisms
but lower values will be silently reset.

* ggml: Multiply by numParallel for gptoss sliding window

When computing the graph size estimate, the context size is already
multiplied by numParallel so estimates reflect that. However, since
sliding window models use a smaller, fixed context size, they need
to manually take numParallel into account.

* gpt-oss integration

includes harmony parser and thinking levels, etc.

* fix sync

* fix tests

* fix lint

---------

Co-authored-by: Daniel Hiltgen <daniel@ollama.com>
Co-authored-by: Jesse Gross <jesse@ollama.com>
Co-authored-by: Devon Rifkin <drifkin@drifkin.net>
This commit is contained in:
Michael Yang
2025-08-05 12:21:16 -07:00
committed by GitHub
parent 0d38b66502
commit fa7776fd24
56 changed files with 6670 additions and 328 deletions

379
server/harmonyparser.go Normal file
View File

@@ -0,0 +1,379 @@
package server
import (
"context"
"log/slog"
"strings"
"unicode"
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/logutil"
)
type harmonyParserState int
const (
harmonyParserState_LookingForMessageStart harmonyParserState = iota
harmonyParserState_ParsingHeader
harmonyParserState_ParsingContent
)
func shouldUseHarmony(model Model) bool {
if model.Config.ModelFamily == "gptoss" {
// heuristic to check whether the template expects to be parsed via harmony:
// search for harmony tags that are nearly always used
if model.Template.Contains("<|start|>") && model.Template.Contains("<|end|>") {
return true
}
}
return false
}
func (s harmonyParserState) String() string {
switch s {
// we're looking for the message start tag
case harmonyParserState_LookingForMessageStart:
return "LookingForMessageStart"
case harmonyParserState_ParsingHeader:
return "ParsingHeader"
case harmonyParserState_ParsingContent:
return "ParsingContent"
default:
return "Unknown"
}
}
type HarmonyParser struct {
state harmonyParserState
MessageStartTag string
MessageEndTag string
HeaderEndTag string
acc strings.Builder
lifetimeAcc strings.Builder
}
type HarmonyEvent interface {
isHarmonyEvent()
}
type HarmonyEventMessageStart struct{}
func (HarmonyEventMessageStart) isHarmonyEvent() {}
type HarmonyEventHeaderComplete struct {
Header HarmonyHeader
}
func (HarmonyEventHeaderComplete) isHarmonyEvent() {}
type HarmonyEventContentEmitted struct {
Content string
}
func (HarmonyEventContentEmitted) isHarmonyEvent() {}
type HarmonyEventMessageEnd struct{}
func (HarmonyEventMessageEnd) isHarmonyEvent() {}
type HarmonyHeader struct {
Role string
Channel string
Recipient string
}
func (s *HarmonyParser) AddImplicitStart() {
s.acc.WriteString("<|start|>assistant")
}
func (s *HarmonyParser) AddImplicitStartOrPrefill(lastMessage *api.Message) {
if lastMessage != nil && lastMessage.Role == "assistant" {
// handle prefilling conditions
if lastMessage.Content != "" {
s.acc.WriteString("<|start|>assistant<|channel|>final<|message|>")
return
} else if lastMessage.Thinking != "" {
s.acc.WriteString("<|start|>assistant<|channel|>analysis<|message|>")
return
}
}
s.AddImplicitStart()
}
func (s *HarmonyParser) AddContent(content string) []HarmonyEvent {
s.lifetimeAcc.WriteString(content)
s.acc.WriteString(content)
var events []HarmonyEvent
keepLooping := true
// we loop because we might pass through multiple parsing states in a single
// call to addContent, and we want to make sure callers don't have to wait for
// data that's already unambiguous
for keepLooping {
var newEvents []HarmonyEvent
newEvents, keepLooping = eat(s)
events = append(events, newEvents...)
}
return events
}
// the additional bool return is true iff we should continue eating
func eat(s *HarmonyParser) ([]HarmonyEvent, bool) {
switch s.state {
case harmonyParserState_LookingForMessageStart:
// does the acc contain the message start tag?
if strings.Contains(s.acc.String(), s.MessageStartTag) {
// split the acc into the message start tag and the rest
split := strings.SplitN(s.acc.String(), s.MessageStartTag, 2)
before := split[0]
if before != "" {
slog.Warn("harmony parser: found message start tag in the middle of the content", "content", s.acc.String())
}
after := split[1]
s.acc.Reset()
s.acc.WriteString(after)
s.state = harmonyParserState_ParsingHeader
return []HarmonyEvent{HarmonyEventMessageStart{}}, true
}
// no match, so we keep accumulating
return nil, false
case harmonyParserState_ParsingHeader:
if strings.Contains(s.acc.String(), s.HeaderEndTag) {
split := strings.SplitN(s.acc.String(), s.HeaderEndTag, 2)
header := split[0]
after := split[1]
s.acc.Reset()
s.acc.WriteString(after)
s.state = harmonyParserState_ParsingContent
return []HarmonyEvent{HarmonyEventHeaderComplete{Header: s.parseHeader(header)}}, true
}
return nil, false
case harmonyParserState_ParsingContent:
if strings.Contains(s.acc.String(), s.MessageEndTag) {
// if we already have the message end tag, we can emit the content up to it
split := strings.SplitN(s.acc.String(), s.MessageEndTag, 2)
content := split[0]
after := split[1]
s.acc.Reset()
s.acc.WriteString(after)
s.state = harmonyParserState_LookingForMessageStart
events := []HarmonyEvent{}
if content != "" {
events = append(events, HarmonyEventContentEmitted{Content: content})
}
events = append(events, HarmonyEventMessageEnd{})
return events, true
} else if overlapLen := overlap(s.acc.String(), s.MessageEndTag); overlapLen > 0 {
// if our suffix contains the start of the message end tag, we can emit
// the content up to the start of the message end tag
content := s.acc.String()[:len(s.acc.String())-overlapLen]
remaining := s.acc.String()[len(s.acc.String())-overlapLen:]
s.acc.Reset()
s.acc.WriteString(remaining)
// emit the content we know isn't part of the message end tag, and keep
// accumulating to disambiguate the rest
if content == "" {
return nil, false
}
return []HarmonyEvent{HarmonyEventContentEmitted{Content: content}}, false
} else {
// no end tag, so it's still normal content that we can immediately emit
content := s.acc.String()
if content == "" {
return nil, false
}
s.acc.Reset()
return []HarmonyEvent{HarmonyEventContentEmitted{Content: content}}, false
}
}
return nil, false
}
func (s *HarmonyParser) parseHeader(raw string) HarmonyHeader {
harmonyHeader := HarmonyHeader{}
// if `<|constrain|>` is present, ensure it has a space before it so it gets
// parsed as a separate token, even if the model didn't include the space
if strings.Contains(raw, "<|constrain|>") {
raw = strings.Replace(raw, "<|constrain|>", " <|constrain|>", 1)
raw = strings.TrimSpace(raw)
}
// look for the optional channel tag, which is `<|channel|>` followed by the
// channel name, all without any whitespace
channelIndex := strings.Index(raw, "<|channel|>")
if channelIndex != -1 {
before := raw[:channelIndex]
after := raw[channelIndex+len("<|channel|>"):]
// the channel name is `after` all the way up to the first (if any) whitespace character
idx := strings.IndexFunc(after, func(r rune) bool {
return unicode.IsSpace(r)
})
if idx == -1 {
idx = len(after)
}
harmonyHeader.Channel = after[:idx]
after = after[idx:]
// now we remove the channel tag from the raw string to further process
raw = before + after
raw = strings.TrimSpace(raw)
}
// split the header into whitespace-separated tokens
tokens := strings.Fields(raw)
// the first token is treated as the role
if len(tokens) == 0 {
slog.Error("harmony parser: missing role in header", "header", raw)
return harmonyHeader
}
role := tokens[0]
tokens = tokens[1:]
// special case: if role starts with to= then it's a tool call
if strings.HasPrefix(role, "to=") {
harmonyHeader.Recipient = role[3:]
harmonyHeader.Role = "tool"
} else {
harmonyHeader.Role = role
}
// the recipient (if any) can be specified before or after the channel tag, so
// we check it at the end once we've already parsed the channel and role
if harmonyHeader.Recipient == "" && len(tokens) > 0 && strings.HasPrefix(tokens[0], "to=") {
harmonyHeader.Recipient = tokens[0][3:]
}
return harmonyHeader
}
// longest overlap between suffix of s and prefix of delim
func overlap(s, delim string) int {
max := min(len(delim), len(s))
for i := max; i > 0; i-- {
if strings.HasSuffix(s, delim[:i]) {
return i
}
}
return 0
}
// harmonyMessageState represents the current state of message processing
type harmonyMessageState int
const (
harmonyMessageState_Normal harmonyMessageState = iota
harmonyMessageState_Thinking
harmonyMessageState_ToolCalling
)
// HarmonyMessageHandler processes harmony events and accumulates content appropriately.
// This is a higher level interface that maps harmony concepts into ollama concepts
type HarmonyMessageHandler struct {
state harmonyMessageState
harmonyParser *HarmonyParser
}
// NewHarmonyMessageHandler creates a new message handler
func NewHarmonyMessageHandler() *HarmonyMessageHandler {
return &HarmonyMessageHandler{
state: harmonyMessageState_Normal,
harmonyParser: &HarmonyParser{
MessageStartTag: "<|start|>",
MessageEndTag: "<|end|>",
HeaderEndTag: "<|message|>",
},
}
}
// AddContent processes the content and returns the content, thinking, and tool content.
// content and thinking are already fully parsed, but tool content still needs to be passed to the tool parser
func (h *HarmonyMessageHandler) AddContent(content string, toolParser *HarmonyToolCallAccumulator) (string, string, string) {
contentSb := strings.Builder{}
thinkingSb := strings.Builder{}
toolContentSb := strings.Builder{}
events := h.harmonyParser.AddContent(content)
for _, event := range events {
switch event := event.(type) {
case HarmonyEventHeaderComplete:
slog.Log(context.TODO(), logutil.LevelTrace, "harmony event header complete", "header", event.Header)
switch event.Header.Channel {
case "analysis":
if event.Header.Recipient != "" {
h.state = harmonyMessageState_ToolCalling
// event.Header.Recipient is the tool name, something like
// "browser.search" for a built-in, or "functions.calc" for a
// custom one
toolParser.SetToolName(event.Header.Recipient)
} else {
h.state = harmonyMessageState_Thinking
}
case "commentary":
if event.Header.Recipient != "" {
h.state = harmonyMessageState_ToolCalling
toolParser.SetToolName(event.Header.Recipient)
} else {
h.state = harmonyMessageState_Normal
}
case "final":
h.state = harmonyMessageState_Normal
}
case HarmonyEventContentEmitted:
slog.Log(context.TODO(), logutil.LevelTrace, "harmony event content", "content", event.Content, "state", h.state)
if h.state == harmonyMessageState_Normal {
contentSb.WriteString(event.Content)
} else if h.state == harmonyMessageState_Thinking {
thinkingSb.WriteString(event.Content)
} else if h.state == harmonyMessageState_ToolCalling {
toolContentSb.WriteString(event.Content)
}
case HarmonyEventMessageEnd:
h.state = harmonyMessageState_Normal
}
}
return contentSb.String(), thinkingSb.String(), toolContentSb.String()
}
func (h *HarmonyMessageHandler) CreateToolParser() *HarmonyToolCallAccumulator {
return &HarmonyToolCallAccumulator{
state: harmonyToolCallState_Normal,
currentToolName: nil,
}
}
type harmonyToolCallState int
const (
harmonyToolCallState_Normal harmonyToolCallState = iota
harmonyToolCallState_ToolCalling
)
type HarmonyToolCallAccumulator struct {
state harmonyToolCallState
acc strings.Builder
currentToolName *string
}
func (a *HarmonyToolCallAccumulator) SetToolName(toolName string) {
a.currentToolName = &toolName
}
func (a *HarmonyToolCallAccumulator) Add(content string) {
a.acc.WriteString(content)
}
func (a *HarmonyToolCallAccumulator) Drain() (*string, string) {
str := a.acc.String()
a.state = harmonyToolCallState_Normal
a.acc.Reset()
return a.currentToolName, str
}
func (a *HarmonyToolCallAccumulator) Content() string {
return a.acc.String()
}

View File

@@ -0,0 +1,469 @@
package server
import (
"fmt"
"reflect"
"testing"
)
func TestHeaderParsing(t *testing.T) {
tests := []struct {
in, wantRole, wantChannel, wantRecipient string
}{
{
in: "assistant<|channel|>analysis",
wantRole: "assistant",
wantChannel: "analysis",
wantRecipient: "",
},
{
in: "assistant<|channel|>analysis to=functions.get_weather",
wantRole: "assistant",
wantChannel: "analysis",
wantRecipient: "functions.get_weather",
},
{
in: "assistant to=functions.get_weather<|channel|>analysis",
wantRole: "assistant",
wantChannel: "analysis",
wantRecipient: "functions.get_weather",
},
// special case where the role is replaced by the recipient (matches reference code)
{
in: "to=functions.get_weather<|channel|>analysis",
wantRole: "tool",
wantChannel: "analysis",
wantRecipient: "functions.get_weather",
},
// extra token after the recipient is ignored
{
in: "assistant to=functions.get_weather abc<|channel|>analysis",
wantRole: "assistant",
wantChannel: "analysis",
wantRecipient: "functions.get_weather",
},
// with constrain tag, recipient after channel tag
{
in: "assistant<|channel|>commentary to=functions.get_weather <|constrain|>json",
wantRole: "assistant",
wantChannel: "commentary",
wantRecipient: "functions.get_weather",
},
// with constrain tag, recipient before channel tag
{
in: "assistant to=functions.get_weather<|channel|>commentary <|constrain|>json",
wantRole: "assistant",
wantChannel: "commentary",
wantRecipient: "functions.get_weather",
},
// constrain tag without space
{
in: "assistant<|channel|>commentary to=functions.get_weather<|constrain|>json",
wantRole: "assistant",
wantChannel: "commentary",
wantRecipient: "functions.get_weather",
},
// constrain tag without space, different order
{
in: "assistant to=functions.get_weather<|channel|>commentary<|constrain|>json",
wantRole: "assistant",
wantChannel: "commentary",
wantRecipient: "functions.get_weather",
},
}
for i, tt := range tests {
parser := HarmonyParser{
MessageStartTag: "<|start|>",
MessageEndTag: "<|end|>",
HeaderEndTag: "<|message|>",
}
header := parser.parseHeader(tt.in)
if header.Role != tt.wantRole {
t.Errorf("case %d: got role \"%s\", want \"%s\"", i, header.Role, tt.wantRole)
}
if header.Channel != tt.wantChannel {
t.Errorf("case %d: got channel \"%s\", want \"%s\"", i, header.Channel, tt.wantChannel)
}
if header.Recipient != tt.wantRecipient {
t.Errorf("case %d: got recipient \"%s\", want \"%s\"", i, header.Recipient, tt.wantRecipient)
}
}
}
func TestHarmonyParserHeaderEvent(t *testing.T) {
tests := []struct {
in, wantRole, wantChannel, wantRecipient string
implicitStart bool
}{
{
in: "<|start|>user<|message|>What is 2 + 2?<|end|>",
wantRole: "user",
wantChannel: "",
wantRecipient: "",
},
{
in: "<|start|>assistant<|channel|>analysis<|message|>What is 2 + 2?<|end|>",
wantRole: "assistant",
wantChannel: "analysis",
wantRecipient: "",
},
{
in: "<|start|>assistant<|channel|>commentary to=functions.get_weather <|constrain|>json<|message|>{\"location\":\"San Francisco\"}<|call|><|start|>functions.get_weather to=assistant<|message|>{\"sunny\": true, \"temperature\": 20}<|end|>",
wantRole: "assistant",
wantChannel: "commentary",
wantRecipient: "functions.get_weather",
},
{
in: "<|channel|>analysis<|message|>User asks weather in SF. We need location. Use get_current_weather with location \"San Francisco, CA\".<|end|><|start|>assistant<|channel|>commentary to=functions.get_current_weather <|constrain|>json<|message|>{\"location\":\"San Francisco, CA\"}<|call|>",
wantRole: "assistant",
wantChannel: "analysis",
wantRecipient: "",
implicitStart: true,
},
}
for i, tt := range tests {
parser := HarmonyParser{
MessageStartTag: "<|start|>",
MessageEndTag: "<|end|>",
HeaderEndTag: "<|message|>",
}
if tt.implicitStart {
parser.AddImplicitStart()
}
gotEvents := parser.AddContent(tt.in)
if len(gotEvents) == 0 {
t.Errorf("case %d: got no events, want at least one", i)
}
var firstHeaderEvent *HarmonyEventHeaderComplete
// print events
for _, event := range gotEvents {
fmt.Printf("event: %+v\n", event)
}
for _, event := range gotEvents {
if event, ok := event.(HarmonyEventHeaderComplete); ok {
firstHeaderEvent = &event
break
}
}
if firstHeaderEvent == nil {
t.Errorf("case %d: got no header complete event, want one", i)
continue
}
gotHeader := firstHeaderEvent.Header
if gotHeader.Role != tt.wantRole || gotHeader.Channel != tt.wantChannel || gotHeader.Recipient != tt.wantRecipient {
t.Errorf("case %d: got header %+v, want role=%s channel=%s recipient=%s", i, gotHeader, tt.wantRole, tt.wantChannel, tt.wantRecipient)
}
}
}
func TestHarmonyParserNonStreaming(t *testing.T) {
tests := []struct {
in string
implicitStart bool
wantEvents []HarmonyEvent
}{
{
in: "<|start|>user<|message|>What is 2 + 2?<|end|>",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}},
HarmonyEventContentEmitted{Content: "What is 2 + 2?"},
HarmonyEventMessageEnd{},
},
},
{
in: "<|start|>assistant<|channel|>analysis<|message|>The answer is 4<|end|>",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "analysis", Recipient: ""}},
HarmonyEventContentEmitted{Content: "The answer is 4"},
HarmonyEventMessageEnd{},
},
},
{
in: "<|start|>assistant<|channel|>commentary to=functions.calc<|message|>Computing...<|end|>",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "commentary", Recipient: "functions.calc"}},
HarmonyEventContentEmitted{Content: "Computing..."},
HarmonyEventMessageEnd{},
},
},
{
in: "<|start|>user<|message|><|end|>",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}},
HarmonyEventMessageEnd{},
},
},
{
in: "<|start|>user<|message|>Hello<|end|><|start|>assistant<|message|>Hi!<|end|>",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}},
HarmonyEventContentEmitted{Content: "Hello"},
HarmonyEventMessageEnd{},
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "", Recipient: ""}},
HarmonyEventContentEmitted{Content: "Hi!"},
HarmonyEventMessageEnd{},
},
},
{
in: "<|channel|>analysis<|message|>Thinking about the request<|end|>",
implicitStart: true,
wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}, HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "analysis", Recipient: ""}}, HarmonyEventContentEmitted{Content: "Thinking about the request"}, HarmonyEventMessageEnd{}},
},
}
for i, tt := range tests {
parser := HarmonyParser{
MessageStartTag: "<|start|>",
MessageEndTag: "<|end|>",
HeaderEndTag: "<|message|>",
}
if tt.implicitStart {
parser.AddImplicitStart()
}
gotEvents := parser.AddContent(tt.in)
if !reflect.DeepEqual(gotEvents, tt.wantEvents) {
t.Errorf("case %d: got events %#v, want %#v", i, gotEvents, tt.wantEvents)
}
}
}
func TestHarmonyParserStreaming(t *testing.T) {
type step struct {
input string
wantEvents []HarmonyEvent
}
cases := []struct {
desc string
implicitStart bool
steps []step
}{
{
desc: "simple message streamed character by character",
steps: []step{
{
input: "<",
wantEvents: nil,
},
{
input: "|",
wantEvents: nil,
},
{
input: "start|>u",
wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}},
},
{
input: "ser<|mess",
wantEvents: nil,
},
{
input: "age|>Hi",
wantEvents: []HarmonyEvent{
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}},
HarmonyEventContentEmitted{Content: "Hi"},
},
},
{
input: " there",
wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: " there"}},
},
{
input: "<|e",
wantEvents: nil,
},
{
input: "nd|>",
wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}},
},
},
},
{
desc: "message with channel streamed",
steps: []step{
{
input: "<|start|>assistant",
wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}},
},
{
input: "<|chan",
wantEvents: nil,
},
{
input: "nel|>analysis",
wantEvents: nil,
},
{
input: "<|message|>",
wantEvents: []HarmonyEvent{HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "analysis", Recipient: ""}}},
},
{
input: "Thinking",
wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "Thinking"}},
},
{
input: "...",
wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "..."}},
},
{
input: "<|end|>",
wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}},
},
},
},
{
desc: "message with channel and recipient",
steps: []step{
{
input: "<|start|>assistant<|channel|>commentary to=functions.calc<|message|>",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "commentary", Recipient: "functions.calc"}},
},
},
{
input: "{\"x\": 5}",
wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "{\"x\": 5}"}},
},
{
input: "<|end|>",
wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}},
},
},
},
{
desc: "message with channel and recipient (receipient before channel)",
steps: []step{
{
input: "<|start|>assistant to=functions.calc<|channel|>commentary<|message|>",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "commentary", Recipient: "functions.calc"}},
},
},
{
input: "{\"x\": 5}",
wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "{\"x\": 5}"}},
},
{
input: "<|end|>",
wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}},
},
},
},
{
desc: "implicit start with channel",
implicitStart: true,
steps: []step{
{
input: "<|channel|>thinking",
wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}},
},
{
input: "<|message|>",
wantEvents: []HarmonyEvent{HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "thinking", Recipient: ""}}},
},
{
input: "Processing request",
wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "Processing request"}},
},
{
input: "<|end|>",
wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}},
},
},
},
{
desc: "multiple messages streamed",
steps: []step{
{
input: "<|start|>user<|message|>Hello<|end|>",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}},
HarmonyEventContentEmitted{Content: "Hello"},
HarmonyEventMessageEnd{},
},
},
{
input: "<|start|>",
wantEvents: []HarmonyEvent{HarmonyEventMessageStart{}},
},
{
input: "assistant<|message|>",
wantEvents: []HarmonyEvent{HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "assistant", Channel: "", Recipient: ""}}},
},
{
input: "Hi!",
wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "Hi!"}},
},
{
input: "<|end|>",
wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}},
},
},
},
{
desc: "empty message",
steps: []step{
{
input: "<|start|>system<|message|><|end|>",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "system", Channel: "", Recipient: ""}},
HarmonyEventMessageEnd{},
},
},
},
},
{
desc: "partial tag that looks like end but isn't",
steps: []step{
{
input: "<|start|>user<|message|>test<|e",
wantEvents: []HarmonyEvent{
HarmonyEventMessageStart{},
HarmonyEventHeaderComplete{Header: HarmonyHeader{Role: "user", Channel: "", Recipient: ""}},
HarmonyEventContentEmitted{Content: "test"},
},
},
{
input: "xample|>more",
wantEvents: []HarmonyEvent{HarmonyEventContentEmitted{Content: "<|example|>more"}},
},
{
input: "<|end|>",
wantEvents: []HarmonyEvent{HarmonyEventMessageEnd{}},
},
},
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
parser := HarmonyParser{
MessageStartTag: "<|start|>",
MessageEndTag: "<|end|>",
HeaderEndTag: "<|message|>",
}
if tc.implicitStart {
parser.AddImplicitStart()
}
for i, step := range tc.steps {
gotEvents := parser.AddContent(step.input)
if !reflect.DeepEqual(gotEvents, step.wantEvents) {
t.Errorf("step %d: input %q: got events %#v, want %#v", i, step.input, gotEvents, step.wantEvents)
}
}
})
}
}

View File

@@ -111,7 +111,8 @@ func (m *Model) Capabilities() []model.Capability {
// Check for thinking capability
openingTag, closingTag := thinking.InferTags(m.Template.Template)
if openingTag != "" && closingTag != "" {
hasTags := openingTag != "" && closingTag != ""
if hasTags || m.Config.ModelFamily == "gptoss" {
capabilities = append(capabilities, model.CapabilityThinking)
}

View File

@@ -19,7 +19,7 @@ type tokenizeFunc func(context.Context, string) ([]int, error)
// chatPrompt accepts a list of messages and returns the prompt and images that should be used for the next chat turn.
// chatPrompt truncates any messages that exceed the context window of the model, making sure to always include 1) the
// latest message and 2) system messages
func chatPrompt(ctx context.Context, m *Model, tokenize tokenizeFunc, opts *api.Options, msgs []api.Message, tools []api.Tool, think *bool) (prompt string, images []llm.ImageData, _ error) {
func chatPrompt(ctx context.Context, m *Model, tokenize tokenizeFunc, opts *api.Options, msgs []api.Message, tools []api.Tool, think *api.ThinkValue) (prompt string, images []llm.ImageData, _ error) {
var system []api.Message
// TODO: Ideally we would compute this from the projector metadata but some pieces are implementation dependent
@@ -42,11 +42,13 @@ func chatPrompt(ctx context.Context, m *Model, tokenize tokenizeFunc, opts *api.
}
thinkVal := false
thinkLevel := ""
if think != nil {
thinkVal = *think
thinkVal = think.AsBool()
thinkLevel = think.AsString()
}
var b bytes.Buffer
if err := m.Template.Execute(&b, template.Values{Messages: append(system, msgs[i:]...), Tools: tools, Think: thinkVal, IsThinkSet: think != nil}); err != nil {
if err := m.Template.Execute(&b, template.Values{Messages: append(system, msgs[i:]...), Tools: tools, Think: thinkVal, ThinkLevel: thinkLevel, IsThinkSet: think != nil}); err != nil {
return "", nil, err
}
@@ -101,10 +103,12 @@ func chatPrompt(ctx context.Context, m *Model, tokenize tokenizeFunc, opts *api.
// truncate any messages that do not fit into the context window
var b bytes.Buffer
thinkVal := false
thinkLevel := ""
if think != nil {
thinkVal = *think
thinkVal = think.AsBool()
thinkLevel = think.AsString()
}
if err := m.Template.Execute(&b, template.Values{Messages: append(system, msgs[currMsgIdx:]...), Tools: tools, Think: thinkVal, IsThinkSet: think != nil}); err != nil {
if err := m.Template.Execute(&b, template.Values{Messages: append(system, msgs[currMsgIdx:]...), Tools: tools, Think: thinkVal, ThinkLevel: thinkLevel, IsThinkSet: think != nil}); err != nil {
return "", nil, err
}

View File

@@ -209,7 +209,7 @@ func TestChatPrompt(t *testing.T) {
model := tt.model
opts := api.Options{Runner: api.Runner{NumCtx: tt.limit}}
think := false
prompt, images, err := chatPrompt(t.Context(), &model, mockRunner{}.Tokenize, &opts, tt.msgs, nil, &think)
prompt, images, err := chatPrompt(t.Context(), &model, mockRunner{}.Tokenize, &opts, tt.msgs, nil, &api.ThinkValue{Value: think})
if tt.error == nil && err != nil {
t.Fatal(err)
} else if tt.error != nil && err != tt.error {

View File

@@ -112,6 +112,11 @@ func (s *Server) scheduleRunner(ctx context.Context, name string, caps []model.C
return nil, nil, nil, err
}
// This model requires a minimum context to function effectively
if slices.Contains(model.Config.ModelFamilies, "gptoss") {
opts.NumCtx = max(opts.NumCtx, 8192)
}
runnerCh, errCh := s.sched.GetRunner(ctx, model, opts, keepAlive)
var runner *runnerRef
select {
@@ -182,11 +187,26 @@ func (s *Server) GenerateHandler(c *gin.Context) {
return
}
useHarmony := shouldUseHarmony(*m) && !req.Raw
var harmonyMessageHandler *HarmonyMessageHandler
var harmonyToolParser *HarmonyToolCallAccumulator
if useHarmony {
harmonyMessageHandler = NewHarmonyMessageHandler()
harmonyMessageHandler.harmonyParser.AddImplicitStart()
harmonyToolParser = harmonyMessageHandler.CreateToolParser()
}
// Validate Think value: string values currently only allowed for gptoss models
if req.Think != nil && req.Think.IsString() && !useHarmony {
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("think value %q is not supported for this model", req.Think.AsString())})
return
}
caps := []model.Capability{model.CapabilityCompletion}
if req.Suffix != "" {
caps = append(caps, model.CapabilityInsert)
}
if req.Think != nil && *req.Think {
if req.Think != nil && req.Think.AsBool() {
caps = append(caps, model.CapabilityThinking)
// TODO(drifkin): consider adding a warning if it's false and the model
// doesn't support thinking. It's not strictly required, but it can be a
@@ -261,7 +281,11 @@ func (s *Server) GenerateHandler(c *gin.Context) {
values.Messages = append(msgs, api.Message{Role: "user", Content: req.Prompt})
}
values.Think = req.Think != nil && *req.Think
values.Think = req.Think != nil && req.Think.AsBool()
values.ThinkLevel = ""
if req.Think != nil {
values.ThinkLevel = req.Think.AsString()
}
values.IsThinkSet = req.Think != nil
var b bytes.Buffer
@@ -284,11 +308,13 @@ func (s *Server) GenerateHandler(c *gin.Context) {
}
var thinkingState *thinking.Parser
openingTag, closingTag := thinking.InferTags(m.Template.Template)
if req.Think != nil && *req.Think && openingTag != "" && closingTag != "" {
thinkingState = &thinking.Parser{
OpeningTag: openingTag,
ClosingTag: closingTag,
if !useHarmony {
openingTag, closingTag := thinking.InferTags(m.Template.Template)
if req.Think != nil && req.Think.AsBool() && openingTag != "" && closingTag != "" {
thinkingState = &thinking.Parser{
OpeningTag: openingTag,
ClosingTag: closingTag,
}
}
}
@@ -316,7 +342,12 @@ func (s *Server) GenerateHandler(c *gin.Context) {
},
}
if thinkingState != nil {
if useHarmony {
content, thinking, toolContent := harmonyMessageHandler.AddContent(cr.Content, harmonyToolParser)
res.Response = content
res.Thinking = thinking
harmonyToolParser.Add(toolContent)
} else if thinkingState != nil {
thinking, content := thinkingState.AddContent(cr.Content)
res.Thinking = thinking
res.Response = content
@@ -327,6 +358,25 @@ func (s *Server) GenerateHandler(c *gin.Context) {
}
if cr.Done {
if useHarmony {
toolName, toolContent := harmonyToolParser.Drain()
if toolName != nil {
*toolName = strings.TrimPrefix(*toolName, "functions.")
var args api.ToolCallFunctionArguments
if err := json.Unmarshal([]byte(toolContent), &args); err != nil {
ch <- gin.H{"error parsing tool call": err.Error()}
return
}
res.ToolCalls = append(res.ToolCalls, api.ToolCall{
Function: api.ToolCallFunction{
Name: *toolName,
Arguments: args,
},
})
}
}
res.DoneReason = cr.DoneReason.String()
res.TotalDuration = time.Since(checkpointStart)
res.LoadDuration = checkpointLoaded.Sub(checkpointStart)
@@ -341,6 +391,15 @@ func (s *Server) GenerateHandler(c *gin.Context) {
}
}
if useHarmony {
// only send messages with meaningful content (empty messages confuse clients)
if res.Response != "" || res.Thinking != "" || res.Done || len(res.ToolCalls) > 0 {
ch <- res
}
return
}
ch <- res
}); err != nil {
ch <- gin.H{"error": err.Error()}
@@ -1471,7 +1530,7 @@ func (s *Server) ChatHandler(c *gin.Context) {
if len(req.Tools) > 0 {
caps = append(caps, model.CapabilityTools)
}
if req.Think != nil && *req.Think {
if req.Think != nil && req.Think.AsBool() {
caps = append(caps, model.CapabilityThinking)
}
@@ -1521,9 +1580,30 @@ func (s *Server) ChatHandler(c *gin.Context) {
return
}
useHarmony := shouldUseHarmony(*m)
// Validate Think value: string values currently only allowed for gptoss models
if req.Think != nil && req.Think.IsString() && !useHarmony {
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("think value %q is not supported for this model", req.Think.AsString())})
return
}
var harmonyMessageHandler *HarmonyMessageHandler
var harmonyToolParser *HarmonyToolCallAccumulator
if useHarmony {
harmonyMessageHandler = NewHarmonyMessageHandler()
var lastMessage *api.Message
if len(msgs) > 0 {
lastMessage = &msgs[len(msgs)-1]
}
harmonyMessageHandler.harmonyParser.AddImplicitStartOrPrefill(lastMessage)
harmonyToolParser = harmonyMessageHandler.CreateToolParser()
}
var thinkingState *thinking.Parser
openingTag, closingTag := thinking.InferTags(m.Template.Template)
if req.Think != nil && *req.Think && openingTag != "" && closingTag != "" {
if req.Think != nil && req.Think.AsBool() && openingTag != "" && closingTag != "" {
thinkingState = &thinking.Parser{
OpeningTag: openingTag,
ClosingTag: closingTag,
@@ -1531,7 +1611,7 @@ func (s *Server) ChatHandler(c *gin.Context) {
}
var toolParser *tools.Parser
if len(req.Tools) > 0 {
if len(req.Tools) > 0 && !useHarmony {
toolParser = tools.NewParser(m.Template.Template, req.Tools)
}
@@ -1557,6 +1637,38 @@ func (s *Server) ChatHandler(c *gin.Context) {
EvalDuration: r.EvalDuration,
},
}
if r.Done {
res.DoneReason = r.DoneReason.String()
res.TotalDuration = time.Since(checkpointStart)
res.LoadDuration = checkpointLoaded.Sub(checkpointStart)
}
if useHarmony {
content, thinking, toolContent := harmonyMessageHandler.AddContent(r.Content, harmonyToolParser)
res.Message.Content = content
res.Message.Thinking = thinking
harmonyToolParser.Add(toolContent)
if r.Done {
toolName, toolContent := harmonyToolParser.Drain()
if toolName != nil {
*toolName = strings.TrimPrefix(*toolName, "functions.")
var args api.ToolCallFunctionArguments
if err := json.Unmarshal([]byte(toolContent), &args); err != nil {
ch <- gin.H{"error parsing tool call": err.Error()}
return
}
res.Message.ToolCalls = []api.ToolCall{{Function: api.ToolCallFunction{Name: *toolName, Arguments: args}}}
}
}
// only send messages with meaningful content (empty messages confuse clients)
if res.Message.Content != "" || res.Message.Thinking != "" || len(res.Message.ToolCalls) > 0 || res.Done {
ch <- res
}
return
}
if thinkingState != nil {
thinkingContent, remainingContent := thinkingState.AddContent(res.Message.Content)
@@ -1568,12 +1680,6 @@ func (s *Server) ChatHandler(c *gin.Context) {
res.Message.Thinking = thinkingContent
}
if r.Done {
res.DoneReason = r.DoneReason.String()
res.TotalDuration = time.Since(checkpointStart)
res.LoadDuration = checkpointLoaded.Sub(checkpointStart)
}
if len(req.Tools) > 0 {
toolCalls, content := toolParser.Add(res.Message.Content)
if len(content) > 0 {

View File

@@ -150,7 +150,7 @@ func TestGenerateChat(t *testing.T) {
Messages: []api.Message{
{Role: "user", Content: "Hello!"},
},
Think: &think,
Think: &api.ThinkValue{Value: think},
})
if w.Code != http.StatusBadRequest {

View File

@@ -0,0 +1,712 @@
package server
// this test file is to test integration of harmony parser into routes.go (as
// opposed to harmonyparser_test.go, which tests the parser in isolation)
import (
"bytes"
"context"
"encoding/json"
"net/http"
"strings"
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/ollama/ollama/api"
"github.com/ollama/ollama/discover"
"github.com/ollama/ollama/fs/ggml"
"github.com/ollama/ollama/llm"
)
func getTestTools() []api.Tool {
return []api.Tool{
{
Type: "function",
Function: api.ToolFunction{
Name: "get_weather",
Description: "Get the current weather in a given location",
Parameters: struct {
Type string `json:"type"`
Defs any `json:"$defs,omitempty"`
Items any `json:"items,omitempty"`
Required []string `json:"required"`
Properties map[string]struct {
Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"`
Enum []any `json:"enum,omitempty"`
} `json:"properties"`
}{
Type: "object",
Required: []string{"location"},
Properties: map[string]struct {
Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"`
Enum []any `json:"enum,omitempty"`
}{
"location": {
Type: api.PropertyType{"string"},
Description: "The city and state, e.g. San Francisco, CA",
},
},
},
},
},
{
Type: "function",
Function: api.ToolFunction{
Name: "calculate",
Description: "Calculate a mathematical expression",
Parameters: struct {
Type string `json:"type"`
Defs any `json:"$defs,omitempty"`
Items any `json:"items,omitempty"`
Required []string `json:"required"`
Properties map[string]struct {
Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"`
Enum []any `json:"enum,omitempty"`
} `json:"properties"`
}{
Type: "object",
Required: []string{"expression"},
Properties: map[string]struct {
Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"`
Enum []any `json:"enum,omitempty"`
}{
"expression": {
Type: api.PropertyType{"string"},
Description: "The mathematical expression to calculate",
},
},
},
},
},
}
}
func createHarmonyTestModel(t *testing.T) (string, string) {
t.Helper()
return createBinFile(t, ggml.KV{
"general.architecture": "gptoss",
"llama.block_count": uint32(1),
"llama.context_length": uint32(8192),
"llama.embedding_length": uint32(4096),
"llama.attention.head_count": uint32(32),
"llama.attention.head_count_kv": uint32(8),
"tokenizer.ggml.tokens": []string{""},
"tokenizer.ggml.scores": []float32{0},
"tokenizer.ggml.token_type": []int32{0},
}, []*ggml.Tensor{
{Name: "token_embd.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_down.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_gate.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_up.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.ffn_norm.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_k.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_q.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "blk.0.attn_v.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
{Name: "output.weight", Shape: []uint64{1}, WriterTo: bytes.NewReader(make([]byte, 4))},
})
}
// TestChatHarmonyParserStreamingRealtime verifies that chunks are emitted as soon as they're available
func TestChatHarmonyParserStreamingRealtime(t *testing.T) {
gin.SetMode(gin.TestMode)
type step struct {
input llm.CompletionResponse
wantContent string
wantThinking string
wantToolCalls []api.ToolCall
}
testCases := []struct {
name string
steps []step
only bool
}{
{
name: "content streams as it arrives",
steps: []step{
{
input: llm.CompletionResponse{Content: "<|message|>Hello", Done: false},
wantContent: "Hello",
},
{
input: llm.CompletionResponse{Content: ", world", Done: false},
wantContent: ", world",
},
{
input: llm.CompletionResponse{Content: "!<|end|>", Done: true, DoneReason: llm.DoneReasonStop},
wantContent: "!",
},
},
},
{
name: "thinking streams separately from content",
steps: []step{
{
input: llm.CompletionResponse{Content: "<|channel|>analysis<|message|>Thinking...", Done: false},
wantThinking: "Thinking...",
},
{
input: llm.CompletionResponse{Content: "<|end|>", Done: false},
// No output expected - just closes the analysis message and resets state to normal
},
{
input: llm.CompletionResponse{Content: "<|start|>assistant<|message|>Answer", Done: false},
wantContent: "Answer", // After message end, state is reset to normal
},
{
input: llm.CompletionResponse{Content: "<|end|>", Done: true, DoneReason: llm.DoneReasonStop},
// No output expected - just closes the assistant message
},
},
},
{
name: "partial tags buffer until complete",
steps: []step{
{
input: llm.CompletionResponse{Content: "<|chan", Done: false},
// No output - partial tag
},
{
input: llm.CompletionResponse{Content: "nel|>analysis<|mess", Done: false},
// No output - still building tags
},
{
input: llm.CompletionResponse{Content: "age|>Deep ", Done: false},
wantThinking: "Deep ",
},
{
input: llm.CompletionResponse{Content: "thought<|end|>", Done: false},
wantThinking: "thought",
},
{
input: llm.CompletionResponse{Content: "<|start|>assistant<|message|>Done<|end|>", Done: true, DoneReason: llm.DoneReasonStop},
wantContent: "Done", // After message end, state is reset to normal
},
},
},
{
name: "simple assistant after analysis",
steps: []step{
{
input: llm.CompletionResponse{Content: "<|channel|>analysis<|message|>Think<|end|><|start|>assistant<|message|>Answer<|end|>", Done: true, DoneReason: llm.DoneReasonStop},
wantContent: "Answer",
wantThinking: "Think",
},
},
},
{
name: "tool call parsed and returned correctly",
steps: []step{
{
input: llm.CompletionResponse{Content: "<|channel|>commentary to=functions.get_weather<|message|>{\"location\":\"San Francisco\"}<|end|><|start|>assistant<|message|>The weather is sunny<|end|>", Done: true, DoneReason: llm.DoneReasonStop},
wantContent: "The weather is sunny",
wantToolCalls: []api.ToolCall{
{
Function: api.ToolCallFunction{
Name: "get_weather",
Arguments: api.ToolCallFunctionArguments{
"location": "San Francisco",
},
},
},
},
},
},
},
{
name: "tool call with streaming JSON across chunks",
steps: []step{
{
input: llm.CompletionResponse{Content: "<|channel|>commentary to=functions.calculate<|message|>{\"expr", Done: false},
// No output yet - incomplete JSON
},
{
input: llm.CompletionResponse{Content: "ession\":\"2+", Done: false},
// Still no output - incomplete JSON
},
{
input: llm.CompletionResponse{Content: "2\"}", Done: true},
wantToolCalls: []api.ToolCall{
{
Function: api.ToolCallFunction{
Name: "calculate",
Arguments: api.ToolCallFunctionArguments{
"expression": "2+2",
},
},
},
},
},
},
},
}
anyOnlies := false
for _, tc := range testCases {
if tc.only {
anyOnlies = true
}
}
for _, tc := range testCases {
if anyOnlies && !tc.only {
continue
}
t.Run(tc.name, func(t *testing.T) {
var chunks []api.ChatResponse
chunkIdx := 0
mockResponses := make([]llm.CompletionResponse, len(tc.steps))
for i, step := range tc.steps {
mockResponses[i] = step.input
}
mock := mockRunner{
CompletionFn: func(ctx context.Context, r llm.CompletionRequest, fn func(llm.CompletionResponse)) error {
for _, resp := range mockResponses {
fn(resp)
// Give the handler time to process each response
time.Sleep(30 * time.Millisecond)
}
return nil
},
}
s := Server{
sched: &Scheduler{
pendingReqCh: make(chan *LlmRequest, 1),
finishedReqCh: make(chan *LlmRequest, 1),
expiredCh: make(chan *runnerRef, 1),
unloadedCh: make(chan any, 1),
loaded: make(map[string]*runnerRef),
newServerFn: newMockServer(&mock),
getGpuFn: discover.GetGPUInfo,
getCpuFn: discover.GetCPUInfo,
reschedDelay: 100 * time.Millisecond,
loadFn: func(req *LlmRequest, _ *ggml.GGML, _ discover.GpuInfoList, _ int) {
req.successCh <- &runnerRef{
llama: &mock,
}
},
},
}
go s.sched.Run(t.Context())
// Create a simple test model
_, digest := createHarmonyTestModel(t)
streamFalse := false
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "harmony-test-streaming",
Files: map[string]string{"test.gguf": digest},
Template: `<|start|><|end|>{{ with .Tools }}{{ end }}{{ .Prompt }}`,
Stream: &streamFalse,
})
if w.Code != 200 {
t.Fatalf("failed to create model: %d", w.Code)
}
// Test chat endpoint with streaming
streamTrue := true
w = createRequest(t, s.ChatHandler, api.ChatRequest{
Model: "harmony-test-streaming",
Messages: []api.Message{{Role: "user", Content: "Hello"}},
Stream: &streamTrue,
Tools: getTestTools(),
})
if w.Code != 200 {
t.Fatalf("chat request failed: %d - %s", w.Code, w.Body.String())
}
// Parse all chunks
decoder := json.NewDecoder(w.Body)
for decoder.More() {
var chunk api.ChatResponse
if err := decoder.Decode(&chunk); err != nil {
t.Fatalf("failed to decode chunk: %v", err)
}
if chunk.Message.Content != "" || chunk.Message.Thinking != "" || len(chunk.Message.ToolCalls) > 0 {
chunks = append(chunks, chunk)
}
}
// Log received chunks for debugging
if t.Failed() || len(chunks) == 0 {
t.Logf("Received %d chunks:", len(chunks))
for i, chunk := range chunks {
t.Logf(" Chunk %d: content=%q thinking=%q", i, chunk.Message.Content, chunk.Message.Thinking)
}
}
// Verify chunks match expected steps
for i, step := range tc.steps {
// Skip steps that don't expect any output
if step.wantContent == "" && step.wantThinking == "" && len(step.wantToolCalls) == 0 {
continue
}
if chunkIdx >= len(chunks) {
t.Errorf("step %d: expected chunk not received (wanted content=%q thinking=%q)",
i, step.wantContent, step.wantThinking)
continue
}
chunk := chunks[chunkIdx]
if chunk.Message.Content != step.wantContent || chunk.Message.Thinking != step.wantThinking {
t.Errorf("step %d: chunk mismatch: got (content=%q, thinking=%q), want (content=%q, thinking=%q)",
i, chunk.Message.Content, chunk.Message.Thinking, step.wantContent, step.wantThinking)
}
// Check tool calls if expected
if len(step.wantToolCalls) > 0 {
if len(chunk.Message.ToolCalls) != len(step.wantToolCalls) {
t.Errorf("step %d: tool calls count mismatch: got %d, want %d",
i, len(chunk.Message.ToolCalls), len(step.wantToolCalls))
} else {
for j, wantCall := range step.wantToolCalls {
if j >= len(chunk.Message.ToolCalls) {
break
}
gotCall := chunk.Message.ToolCalls[j]
if gotCall.Function.Name != wantCall.Function.Name {
t.Errorf("step %d, tool call %d: name mismatch: got %q, want %q",
i, j, gotCall.Function.Name, wantCall.Function.Name)
}
// Compare arguments as JSON strings for simplicity
gotArgs, _ := json.Marshal(gotCall.Function.Arguments)
wantArgs, _ := json.Marshal(wantCall.Function.Arguments)
if string(gotArgs) != string(wantArgs) {
t.Errorf("step %d, tool call %d: arguments mismatch: got %s, want %s",
i, j, string(gotArgs), string(wantArgs))
}
}
}
}
chunkIdx++
}
// Check if we have extra chunks
if chunkIdx < len(chunks) {
t.Errorf("received %d extra chunks", len(chunks)-chunkIdx)
for i := chunkIdx; i < len(chunks); i++ {
t.Logf(" extra chunk %d: content=%q thinking=%q",
i-chunkIdx, chunks[i].Message.Content, chunks[i].Message.Thinking)
}
}
})
}
}
// TestChatHarmonyParserStreamingSimple is a simpler test that just verifies basic streaming
func TestChatHarmonyParserStreamingSimple(t *testing.T) {
gin.SetMode(gin.TestMode)
mockResponses := []llm.CompletionResponse{
{Content: "<|message|>First ", Done: false},
{Content: "chunk ", Done: false},
{Content: "here<|end|>", Done: true, DoneReason: llm.DoneReasonStop},
}
mock := mockRunner{
CompletionFn: func(ctx context.Context, r llm.CompletionRequest, fn func(llm.CompletionResponse)) error {
t.Logf("Mock received prompt: %q", r.Prompt)
t.Logf("Mock sending %d responses", len(mockResponses))
for i, resp := range mockResponses {
t.Logf("Sending response %d: %q", i, resp.Content)
fn(resp)
}
return nil
},
}
s := Server{
sched: &Scheduler{
pendingReqCh: make(chan *LlmRequest, 1),
finishedReqCh: make(chan *LlmRequest, 1),
expiredCh: make(chan *runnerRef, 1),
unloadedCh: make(chan any, 1),
loaded: make(map[string]*runnerRef),
newServerFn: newMockServer(&mock),
getGpuFn: discover.GetGPUInfo,
getCpuFn: discover.GetCPUInfo,
reschedDelay: 100 * time.Millisecond,
loadFn: func(req *LlmRequest, _ *ggml.GGML, _ discover.GpuInfoList, _ int) {
req.successCh <- &runnerRef{
llama: &mock,
}
},
},
}
go s.sched.Run(t.Context())
// Create model
_, digest := createHarmonyTestModel(t)
streamFalse := false
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "gpt-oss",
Files: map[string]string{"test.gguf": digest},
Template: `<|start|><|end|>{{ .Tools }}{{ .Prompt }}`,
Stream: &streamFalse,
})
if w.Code != 200 {
t.Fatalf("failed to create model: %d", w.Code)
}
// Test streaming
streamTrue := true
w = createRequest(t, s.ChatHandler, api.ChatRequest{
Model: "gpt-oss",
Messages: []api.Message{{Role: "user", Content: "Hello"}},
Stream: &streamTrue,
Tools: getTestTools(),
})
if w.Code != 200 {
t.Fatalf("chat request failed: %d - %s", w.Code, w.Body.String())
}
// Parse chunks
var chunks []api.ChatResponse
decoder := json.NewDecoder(w.Body)
for decoder.More() {
var chunk api.ChatResponse
if err := decoder.Decode(&chunk); err != nil {
t.Fatalf("failed to decode chunk: %v", err)
}
chunks = append(chunks, chunk)
t.Logf("Received chunk %d: content=%q thinking=%q done=%v",
len(chunks), chunk.Message.Content, chunk.Message.Thinking, chunk.Done)
}
// Verify we got chunks
if len(chunks) == 0 {
t.Fatal("expected streaming chunks, got none")
}
// Verify content
var content strings.Builder
for _, chunk := range chunks {
content.WriteString(chunk.Message.Content)
}
expectedContent := "First chunk here"
if content.String() != expectedContent {
t.Errorf("content mismatch: got %q, want %q", content.String(), expectedContent)
}
// Verify we got multiple chunks (streaming)
contentChunks := 0
for _, chunk := range chunks {
if chunk.Message.Content != "" {
contentChunks++
}
}
if contentChunks < 2 {
t.Errorf("expected at least 2 content chunks for streaming, got %d", contentChunks)
}
}
func TestChatHarmonyParserStreaming(t *testing.T) {
gin.SetMode(gin.TestMode)
type expectedChunk struct {
afterResponse int // Which mock response this chunk should appear after
content string // Expected content in this chunk
thinking string // Expected thinking in this chunk
}
testCases := []struct {
name string
mockResponses []llm.CompletionResponse
expectedChunks []expectedChunk
wantContent string
wantThinking string
}{
{
name: "simple message without thinking",
mockResponses: []llm.CompletionResponse{
{Content: "<|start|>assistant<|message|>Hello, ", Done: false},
{Content: "how can I help?", Done: false},
{Content: "<|end|>", Done: true, DoneReason: llm.DoneReasonStop},
},
expectedChunks: []expectedChunk{
{afterResponse: 1, content: "Hello, "},
{afterResponse: 2, content: "how can I help?"},
},
wantContent: "Hello, how can I help?",
},
{
name: "message with analysis channel for thinking",
mockResponses: []llm.CompletionResponse{
{Content: "<|channel|>analysis<|message|>", Done: false},
{Content: "Let me think ", Done: false},
{Content: "about this problem...", Done: false},
{Content: "<|end|>", Done: false},
{Content: "<|start|>assistant<|message|>", Done: false},
{Content: "The answer ", Done: false},
{Content: "is 42", Done: false},
{Content: "<|end|>", Done: true, DoneReason: llm.DoneReasonStop},
},
expectedChunks: []expectedChunk{
{afterResponse: 2, thinking: "Let me think "},
{afterResponse: 3, thinking: "about this problem..."},
{afterResponse: 6, content: "The answer "},
{afterResponse: 7, content: "is 42"},
},
wantContent: "The answer is 42",
wantThinking: "Let me think about this problem...",
},
{
name: "streaming with partial tags across boundaries",
mockResponses: []llm.CompletionResponse{
{Content: "<|chan", Done: false},
{Content: "nel|>analy", Done: false},
{Content: "sis<|mess", Done: false},
{Content: "age|>Think", Done: false},
{Content: "ing deeply...<|end|>", Done: false},
{Content: "<|start|>assi", Done: false},
{Content: "stant<|message|>Result ", Done: false},
{Content: "computed<|e", Done: false},
{Content: "nd|>", Done: true, DoneReason: llm.DoneReasonStop},
},
expectedChunks: []expectedChunk{
{afterResponse: 4, thinking: "Think"},
{afterResponse: 5, thinking: "ing deeply..."},
{afterResponse: 7, content: "Result "},
{afterResponse: 8, content: "computed"},
},
wantContent: "Result computed",
wantThinking: "Thinking deeply...",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Channel to synchronize mock responses with chunk verification
responsesSent := make(chan int, len(tc.mockResponses))
mock := mockRunner{
CompletionFn: func(ctx context.Context, r llm.CompletionRequest, fn func(llm.CompletionResponse)) error {
// Send mock responses one at a time, notifying when each is sent
for i, resp := range tc.mockResponses {
fn(resp)
responsesSent <- i + 1
}
close(responsesSent)
return nil
},
}
s := Server{
sched: &Scheduler{
pendingReqCh: make(chan *LlmRequest, 1),
finishedReqCh: make(chan *LlmRequest, 1),
expiredCh: make(chan *runnerRef, 1),
unloadedCh: make(chan any, 1),
loaded: make(map[string]*runnerRef),
newServerFn: newMockServer(&mock),
getGpuFn: discover.GetGPUInfo,
getCpuFn: discover.GetCPUInfo,
reschedDelay: 250 * time.Millisecond,
loadFn: func(req *LlmRequest, _ *ggml.GGML, _ discover.GpuInfoList, _ int) {
req.successCh <- &runnerRef{
llama: &mock,
}
},
},
}
go s.sched.Run(t.Context())
// Create a minimal model
_, digest := createHarmonyTestModel(t)
// Create model with passthrough template
stream := false
w := createRequest(t, s.CreateHandler, api.CreateRequest{
Model: "harmony-test",
Files: map[string]string{"file.gguf": digest},
Template: `<|start|><|end|>{{ with .Tools }}{{ end }}{{ .Prompt }}`,
Stream: &stream,
})
if w.Code != http.StatusOK {
t.Fatalf("failed to create model: %d", w.Code)
}
// Test chat endpoint with streaming
streamTrue := true
w = createRequest(t, s.ChatHandler, api.ChatRequest{
Model: "harmony-test",
Messages: []api.Message{{Role: "user", Content: "Hello"}},
Stream: &streamTrue,
Tools: getTestTools(),
})
if w.Code != http.StatusOK {
t.Fatalf("chat request failed: %d - %s", w.Code, w.Body.String())
}
// Parse streaming response
var chunks []api.ChatResponse
var content, thinking strings.Builder
decoder := json.NewDecoder(w.Body)
for decoder.More() {
var chunk api.ChatResponse
if err := decoder.Decode(&chunk); err != nil {
t.Fatalf("failed to decode chunk: %v", err)
}
chunks = append(chunks, chunk)
// Accumulate content and thinking from each chunk
content.WriteString(chunk.Message.Content)
thinking.WriteString(chunk.Message.Thinking)
// Debug output
t.Logf("Chunk %d: content=%q thinking=%q done=%v", len(chunks), chunk.Message.Content, chunk.Message.Thinking, chunk.Done)
}
// Verify we got streaming chunks
if len(chunks) == 0 {
t.Fatal("expected streaming chunks, got none")
}
gotContent := content.String()
gotThinking := thinking.String()
if gotContent != tc.wantContent {
t.Errorf("content mismatch: got %q, want %q", gotContent, tc.wantContent)
}
if gotThinking != tc.wantThinking {
t.Errorf("thinking mismatch: got %q, want %q", gotThinking, tc.wantThinking)
}
// Verify last chunk has done=true
lastChunk := chunks[len(chunks)-1]
if !lastChunk.Done {
t.Error("expected last chunk to have done=true")
}
})
}
}