diff --git a/home/user/.config/pva/conf.d/13-ollama.conf b/home/user/.config/pva/conf.d/13-ollama.conf index 1120c8d..735e743 100644 --- a/home/user/.config/pva/conf.d/13-ollama.conf +++ b/home/user/.config/pva/conf.d/13-ollama.conf @@ -12,8 +12,12 @@ text:"en","AIIDENTIFYIMAGE","What do you see on this image?" ai:"keyword","question" ai:"enable","false" -ai:"port","11435" +# we default now to ollama's normal port, not Alpaca anymore. +ai:"port","11434" ai:"host","localhost" + +# You have to have this model installed, or it does not work out-of-box!!! + ai:"model","llava:latest" ai:"mode","gapfiller" ai:"device","/dev/video1" @@ -25,6 +29,15 @@ ai:"quality","70" ai:"languageprompt","antworte in deutsch." ai:"aliases","freisprechen=freetalk,schlüsselwort=keyword,lückenfüller=gapfiller,schlüsselwort modus=keyword,lückenfüller modus=gapfiller" +# A note: Yes, we could readout ollamas list of installed models, but it wont work. the STT system will translate "llava:latest" to "laber doppelpunkt later ist" if you are lucky, so we need alias that are recognizeable. +# If you enter models you do not have installed or running and swap to them, "AI" stops working, but you can always switch back. +# Example: +# ai:"models","normal=llava-llama3:latest,mond=moondream:1.8b-v2-fp16,lava=llava:latest" + +ai:"models","lava=llava:latest" + +command:"sprachmodell wechseln zu .*","AISWAPMODEL","","" +command:"künstliche intelligenz wechsel zu .*","AISWAP","","" command:"was ist auf dem bild zu sehen","AIIDENTIFYIMAGE","","" command:"was ist auf den bildern zu sehen","AIIDENTIFYIMAGE","","" command:"was halte ich in die kamera","AIIDENTIFYCAM","",""