Event JSON
{
"id": "1e2caf4cdd999291342e57ad4a721fa77cc26a969b958cff34d51140ce863228",
"pubkey": "41727036bf8b17d496880125a9ed349c351b8b424384c7314fdfdb2a538b358d",
"created_at": 1744283068,
"kind": 1,
"tags": [
[
"t",
"selfhosted"
],
[
"t",
"ai"
],
[
"t",
"ollama"
],
[
"proxy",
"https://fosstodon.org/users/jamesravey/statuses/114313335154313948",
"activitypub"
],
[
"client",
"Mostr",
"31990:6be38f8c63df7dbf84db7ec4a6e6fbbd8d19dca3b980efad18585c46f04b26f9:mostr",
"wss://relay.mostr.pub"
]
],
"content": "You can now use Ollama model with Continue.Dev to do “agentic” code generation in VSCode. However if you use LiteLLM to manage your model access, you won’t be able to take advantage of this feature just yet https://github.com/continuedev/continue/issues/5044\n\n#selfhosted #ai #ollama (https://brainsteam.co.uk/notes/2025/04/10/1744283027/)",
"sig": "1eebd8800679976b4389ef89bd79bf1b1d3ab8e7a978390772c873d2f913557a604250019a1a2149038ae08a751577279f83129bd567635f407a7c8341997f2b"
}