Enum GeminiSafetyHarmCategory
- Namespace
- Uralstech.UGemini.Models.Generation.Safety
The category of a rating.
public enum GeminiSafetyHarmCategory
- Extension Methods
Fields
[EnumMember(Value = "HARM_CATEGORY_CIVIC_INTEGRITY")] CivicIntegrity = 11
Content that may be used to harm civic integrity.
[EnumMember(Value = "HARM_CATEGORY_DANGEROUS")] Dangerous = 6
Dangerous content that promotes, facilitates, or encourages harmful acts.
[EnumMember(Value = "HARM_CATEGORY_DANGEROUS_CONTENT")] DangerousContent = 10
Dangerous content.
[EnumMember(Value = "HARM_CATEGORY_DEROGATORY")] Derogatory = 1
Negative or harmful comments targeting identity and/or protected attribute.
[EnumMember(Value = "HARM_CATEGORY_HARASSMENT")] Harassment = 7
Harasment content.
[EnumMember(Value = "HARM_CATEGORY_HATE_SPEECH")] HateSpeech = 8
Hate speech and content.
[EnumMember(Value = "HARM_CATEGORY_MEDICAL")] Medical = 5
Promotes unchecked medical advice.
[EnumMember(Value = "HARM_CATEGORY_SEXUAL")] Sexual = 4
Contains references to sexual acts or other lewd content.
[EnumMember(Value = "HARM_CATEGORY_SEXUALLY_EXPLICIT")] SexuallyExplicit = 9
Sexually explicit content.
[EnumMember(Value = "HARM_CATEGORY_TOXICITY")] Toxicity = 2
Content that is rude, disrespectful, or profane.
[EnumMember(Value = "HARM_CATEGORY_UNSPECIFIED")] Unspecified = 0
Category is unspecified.
[EnumMember(Value = "HARM_CATEGORY_VIOLENCE")] Violence = 3
Describes scenarios depicting violence against an individual or group, or general descriptions of gore.