Event JSON
{
"id": "3e41042653bb36df0c1798c2adaee705f10e3dfbfdfdab02cbd5d97a7a1ac7ea",
"pubkey": "3277afbb2cd5e48f037d6d23b6eb6eb037ee535d51c96b37200046c65bced486",
"created_at": 1760449161,
"kind": 1,
"tags": [
[
"proxy",
"https://23.social/@thomasfricke/115372796232969081",
"web"
],
[
"t",
"ai"
],
[
"t",
"llm"
],
[
"t",
"security"
],
[
"t",
"backdoor"
],
[
"proxy",
"https://23.social/users/thomasfricke/statuses/115372796232969081",
"activitypub"
],
[
"L",
"pink.momostr"
],
[
"l",
"pink.momostr.activitypub:https://23.social/users/thomasfricke/statuses/115372796232969081",
"pink.momostr"
],
[
"-"
]
],
"content": "A small number of samples can poison #LLM s of any size \\ Anthropic\nhttps://www.anthropic.com/research/small-samples-poison\n\n\"... with the UK #AI #Security Institute and the Alan Turing Institute, we found that as few as 250 malicious documents can produce a \"#backdoor\" vulnerability in a large language model—regardless of model size or training data volume. ... 13B parameter model is trained on over 20 times more training data than a 600M model, both can be backdoored by the same small number of poisoned documents\"",
"sig": "569362e1a0648211512200a6e234c03497aa11eb50e9cba7d9641034bcf126f9e29ac370c0d235e2aacf4da79669f282d89373265fa5ce7d9f923fc51a798d70"
}