mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-21 22:33:56 +00:00
Merge branch 'ollama:main' into main
This commit is contained in:
@@ -351,8 +351,11 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [ARGO](https://github.com/xark-argo/argo) (Locally download and run Ollama and Huggingface models with RAG on Mac/Windows/Linux)
|
||||
- [G1](https://github.com/bklieger-groq/g1) (Prototype of using prompting strategies to improve the LLM's reasoning through o1-like reasoning chains.)
|
||||
- [Ollama App](https://github.com/JHubi1/ollama-app) (Modern and easy-to-use multi-platform client for Ollama)
|
||||
- [Perfect Memory AI](https://www.perfectmemory.ai/) (Productivity AI assists personalized by what you have seen on your screen, heard and said in the meetings)
|
||||
- [Hexabot](https://github.com/hexastack/hexabot) (A conversational AI builder)
|
||||
- [Reddit Rate]((https://github.com/rapidarchitect/reddit_analyzer)) (Search and Rate Reddit topics with a weighted summation)
|
||||
- [Reddit Rate](https://github.com/rapidarchitect/reddit_analyzer) (Search and Rate Reddit topics with a weighted summation)
|
||||
- [VT](https://github.com/vinhnx/vt.ai) (A minimal multimodal AI chat app, with dynamic conversation routing. Supports local models via Ollama)
|
||||
- [Witsy](https://github.com/nbonamy/witsy) (An AI Desktop application avaiable for Mac/Windows/Linux)
|
||||
|
||||
### Terminal
|
||||
|
||||
@@ -437,6 +440,8 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [Agents-Flex for Java](https://github.com/agents-flex/agents-flex) with [example](https://github.com/agents-flex/agents-flex/tree/main/agents-flex-llm/agents-flex-llm-ollama/src/test/java/com/agentsflex/llm/ollama)
|
||||
- [Ollama for Swift](https://github.com/mattt/ollama-swift)
|
||||
- [GoLamify](https://github.com/prasad89/golamify)
|
||||
- [Ollama for Haskell](https://github.com/tusharad/ollama-haskell)
|
||||
- [multi-llm-ts](https://github.com/nbonamy/multi-llm-ts) (A Typescript/JavaScript library allowing access to different LLM in unified API)
|
||||
|
||||
### Mobile
|
||||
|
||||
@@ -480,6 +485,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [QodeAssist](https://github.com/Palm1r/QodeAssist) (AI-powered coding assistant plugin for Qt Creator)
|
||||
- [Obsidian Quiz Generator plugin](https://github.com/ECuiDev/obsidian-quiz-generator)
|
||||
- [TextCraft](https://github.com/suncloudsmoon/TextCraft) (Copilot in Word alternative using Ollama)
|
||||
- [Alfred Ollama](https://github.com/zeitlings/alfred-ollama) (Alfred Workflow)
|
||||
|
||||
### Supported backends
|
||||
|
||||
|
||||
@@ -361,7 +361,7 @@ func (t *winTray) showMenu() error {
|
||||
|
||||
boolRet, _, err = pTrackPopupMenu.Call(
|
||||
uintptr(t.menus[0]),
|
||||
TPM_BOTTOMALIGN|TPM_LEFTALIGN,
|
||||
TPM_BOTTOMALIGN|TPM_LEFTALIGN|TPM_RIGHTBUTTON,
|
||||
uintptr(p.X),
|
||||
uintptr(p.Y),
|
||||
0,
|
||||
|
||||
@@ -67,6 +67,7 @@ const (
|
||||
SW_HIDE = 0
|
||||
TPM_BOTTOMALIGN = 0x0020
|
||||
TPM_LEFTALIGN = 0x0000
|
||||
TPM_RIGHTBUTTON = 0x0002
|
||||
WM_CLOSE = 0x0010
|
||||
WM_USER = 0x0400
|
||||
WS_CAPTION = 0x00C00000
|
||||
|
||||
@@ -112,6 +112,21 @@ sudo systemctl status ollama
|
||||
> https://www.amd.com/en/support/linux-drivers for best support of your Radeon
|
||||
> GPU.
|
||||
|
||||
## Customizing
|
||||
|
||||
To customize the installation of Ollama, you can edit the systemd service file or the environment variables by running:
|
||||
|
||||
```
|
||||
sudo systemctl edit ollama
|
||||
```
|
||||
|
||||
Alternatively, create an override file manually in `/etc/systemd/system/ollama.service.d/override.conf`:
|
||||
|
||||
```ini
|
||||
[Service]
|
||||
Environment="OLLAMA_DEBUG=1"
|
||||
```
|
||||
|
||||
## Updating
|
||||
|
||||
Update Ollama by running the install script again:
|
||||
|
||||
@@ -1092,9 +1092,11 @@ func (s *llmServer) EstimatedTotal() uint64 {
|
||||
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
|
||||
for i, gpu := range s.gpus {
|
||||
if gpu.ID == gpuID {
|
||||
if i < len(s.estimate.GPUSizes) {
|
||||
return s.estimate.GPUSizes[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
@@ -4,9 +4,12 @@
|
||||
|
||||
set -eu
|
||||
|
||||
red="$( (/usr/bin/tput bold; /usr/bin/tput setaf 1; :) 2>&-)"
|
||||
plain="$( (/usr/bin/tput sgr0; :) 2>&-)"
|
||||
|
||||
status() { echo ">>> $*" >&2; }
|
||||
error() { echo "ERROR $*"; exit 1; }
|
||||
warning() { echo "WARNING: $*"; }
|
||||
error() { echo "${red}ERROR:${plain} $*"; exit 1; }
|
||||
warning() { echo "${red}WARNING:${plain} $*"; }
|
||||
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
cleanup() { rm -rf $TEMP_DIR; }
|
||||
@@ -162,6 +165,12 @@ EOF
|
||||
start_service() { $SUDO systemctl restart ollama; }
|
||||
trap start_service EXIT
|
||||
;;
|
||||
*)
|
||||
warning "systemd is not running"
|
||||
if [ "$IS_WSL2" = true ]; then
|
||||
warning "see https://learn.microsoft.com/en-us/windows/wsl/systemd#how-to-enable-systemd to enable it"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ func TestChatPrompt(t *testing.T) {
|
||||
mllamaModel := Model{Template: tmpl, ProjectorPaths: []string{"vision"}, Config: ConfigV2{ModelFamilies: []string{"mllama"}}}
|
||||
|
||||
createImg := func(width, height int) ([]byte, error) {
|
||||
img := image.NewRGBA(image.Rect(0, 0, 5, 5))
|
||||
img := image.NewRGBA(image.Rect(0, 0, width, height))
|
||||
var buf bytes.Buffer
|
||||
|
||||
if err := png.Encode(&buf, img); err != nil {
|
||||
|
||||
Reference in New Issue
Block a user