rahulnair23 ingelise commited on
Commit
3aaebfe
·
verified ·
1 Parent(s): dde8af2

iv_json_updater (#5)

Browse files

- chore: update model version (c2a4f327b767c6423b0e1a9aa597a4e3ae7bd5eb)
- fix: clear content before processing new usecase (c3beeca949fc714bef6184511ea829f586f2c7c3)


Co-authored-by: Inge V <ingelise@users.noreply.huggingface.co>

Files changed (2) hide show
  1. app.py +40 -30
  2. executor.py +16 -4
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import gradio as gr
3
  import logging
4
- from executor import risk_identifier, mitigations
5
 
6
  logging.basicConfig(level=logging.INFO)
7
  logger = logging.getLogger(__name__)
@@ -47,15 +47,15 @@ class UI:
47
  interactive=True,
48
  )
49
  self.model_name_or_path = gr.Dropdown(
50
- #choices=['codellama/codellama-34b-instruct-hf', 'google/flan-t5-xl', 'google/flan-t5-xxl', 'google/flan-ul2', 'ibm/granite-13b-instruct-v2', 'ibm/granite-20b-code-instruct', 'ibm/granite-20b-multilingual', 'ibm/granite-3-2-8b-instruct-preview-rc', 'ibm/granite-3-2b-instruct', 'ibm/granite-3-8b-instruct', 'ibm/granite-34b-code-instruct', 'ibm/granite-3b-code-instruct', 'ibm/granite-8b-code-instruct', 'ibm/granite-guardian-3-2b', 'ibm/granite-guardian-3-8b', 'meta-llama/llama-2-13b-chat', 'meta-llama/llama-3-1-70b-instruct', 'meta-llama/llama-3-1-8b-instruct', 'meta-llama/llama-3-2-11b-vision-instruct', 'meta-llama/llama-3-2-1b-instruct', 'meta-llama/llama-3-2-3b-instruct', 'meta-llama/llama-3-2-90b-vision-instruct', 'meta-llama/llama-3-3-70b-instruct', 'meta-llama/llama-3-405b-instruct', 'meta-llama/llama-guard-3-11b-vision', 'mistralai/mistral-large', 'mistralai/mixtral-8x7b-instruct-v01'],
51
- choices=["ibm/granite-20b-code-instruct"],
52
- value="ibm/granite-20b-code-instruct",
53
  multiselect=False,
54
  label="Choose language model to use",
55
  info="Language model used to assess risks (This is not the model being assessed).",
56
  interactive=True
57
  )
58
- examples = gr.Examples([["A medical chatbot for a triage system that assesses patients' symptoms and provides immediate, contextually relevant advice based on their medical history and current condition. The chatbot uses generative AI to analyze the patient's input, identify potential medical issues, and offer tailored recommendations or insights to the patient or healthcare provider.", "ibm-risk-atlas"],
59
  ["Building a customer support agent that automatically triages common problems with services.", "ibm-risk-atlas"]],
60
  [self.usecase, self.taxonomy],
61
  label='Example use cases', example_labels=["Medical chatbot", "Customer service agent"]
@@ -68,29 +68,31 @@ class UI:
68
  self.assessed_risks = gr.Dataset(elem_classes="risks", label=None, visible=False)
69
  self.assessed_risk_definition = gr.Markdown()
70
 
71
- gr.Markdown(
72
- """<h2> Related Risks </h2>
73
- Select a potential risk above to check for related risks.
74
- """
75
- )
76
- rrtb = gr.Markdown()
77
- self.relatedrisks = gr.Dataset(elem_classes="related-risks", components=[rrtb], label=None, visible=False)
78
-
79
- gr.Markdown(
80
- """<h2> Mitigations </h2>
81
- Select a potential risk to determine possible mitigations. """
82
- )
83
- self.mitigations_text = gr.Markdown()
84
- self.mitigations = gr.DataFrame(label=None, visible=False)
85
-
86
- gr.Markdown(
87
- """<h2>Benchmarks </h2>
88
- Select a potential risk to determine possible AI evaluations. """
89
- )
90
- self.benchmarks_text = gr.Markdown()
91
- self.benchmarks = gr.DataFrame(label=None, visible=False)
92
-
93
- self.download = gr.DownloadButton("Download JSON", visible=False)
 
 
94
 
95
  gr.Markdown("---")
96
  gr.Markdown("<br>")
@@ -107,12 +109,16 @@ class UI:
107
 
108
  self.header_block()
109
  self.risks = gr.State()
110
-
111
  # Risk assessment based on user intents
112
  self.risk_extraction()
113
 
114
- # Register event listeners
115
  self.risk_execute.click(
 
 
 
 
116
  fn=risk_identifier,
117
  inputs=[
118
  self.usecase,
@@ -124,6 +130,10 @@ class UI:
124
  )
125
 
126
  self.assessed_risks.select(
 
 
 
 
127
  fn=mitigations,
128
  inputs=[self.assessed_risks, self.taxonomy],
129
  # NOTETOSELF: Intent based risk is stored in self.risk (if needed)
 
1
  import os
2
  import gradio as gr
3
  import logging
4
+ from executor import clear_previous_risks, clear_previous_mitigations, risk_identifier, mitigations
5
 
6
  logging.basicConfig(level=logging.INFO)
7
  logger = logging.getLogger(__name__)
 
47
  interactive=True,
48
  )
49
  self.model_name_or_path = gr.Dropdown(
50
+ #choices=['codellama/codellama-34b-instruct-hf', 'google/flan-t5-xl', 'google/flan-t5-xxl', 'google/flan-ul2', 'ibm/granite-13b-instruct-v2', 'ibm/granite-3-3-8b-instruct', 'ibm/granite-20b-multilingual', 'ibm/granite-3-2-8b-instruct-preview-rc', 'ibm/granite-3-2b-instruct', 'ibm/granite-3-8b-instruct', 'ibm/granite-34b-code-instruct', 'ibm/granite-3b-code-instruct', 'ibm/granite-8b-code-instruct', 'ibm/granite-guardian-3-2b', 'ibm/granite-guardian-3-8b', 'meta-llama/llama-2-13b-chat', 'meta-llama/llama-3-1-70b-instruct', 'meta-llama/llama-3-1-8b-instruct', 'meta-llama/llama-3-2-11b-vision-instruct', 'meta-llama/llama-3-2-1b-instruct', 'meta-llama/llama-3-2-3b-instruct', 'meta-llama/llama-3-2-90b-vision-instruct', 'meta-llama/llama-3-3-70b-instruct', 'meta-llama/llama-3-405b-instruct', 'meta-llama/llama-guard-3-11b-vision', 'mistralai/mistral-large', 'mistralai/mixtral-8x7b-instruct-v01'],
51
+ choices=["ibm/granite-3-3-8b-instruct"],
52
+ value="ibm/granite-3-3-8b-instruct",
53
  multiselect=False,
54
  label="Choose language model to use",
55
  info="Language model used to assess risks (This is not the model being assessed).",
56
  interactive=True
57
  )
58
+ examples = gr.Examples([["A medical chatbot for a triage system that assesses patient symptoms and provides advice based on their medical history and current condition. The chatbot uses generative AI to analyze the patient input, identify potential medical issues, and offer tailored recommendations or insights to the patient or healthcare provider.", "ibm-risk-atlas"],
59
  ["Building a customer support agent that automatically triages common problems with services.", "ibm-risk-atlas"]],
60
  [self.usecase, self.taxonomy],
61
  label='Example use cases', example_labels=["Medical chatbot", "Customer service agent"]
 
68
  self.assessed_risks = gr.Dataset(elem_classes="risks", label=None, visible=False)
69
  self.assessed_risk_definition = gr.Markdown()
70
 
71
+ if len(self.assessed_risks.elem_classes ) > 0:
72
+ gr.Markdown(
73
+ """<h2> Related Risks </h2>
74
+ Select a potential risk above to check for related risks.
75
+ """
76
+ )
77
+ rrtb = gr.Markdown()
78
+ self.relatedrisks = gr.Dataset(elem_classes="related-risks", components=[rrtb], label=None, visible=False)
79
+
80
+
81
+ gr.Markdown(
82
+ """<h2> Mitigations </h2>
83
+ Select a potential risk to determine possible mitigations. """
84
+ )
85
+ self.mitigations_text = gr.Markdown()
86
+ self.mitigations = gr.DataFrame(label=None, visible=False)
87
+
88
+ gr.Markdown(
89
+ """<h2>Benchmarks </h2>
90
+ Select a potential risk to determine possible AI evaluations. """
91
+ )
92
+ self.benchmarks_text = gr.Markdown()
93
+ self.benchmarks = gr.DataFrame(label=None, visible=False)
94
+
95
+ self.download = gr.DownloadButton("Download JSON", visible=False)
96
 
97
  gr.Markdown("---")
98
  gr.Markdown("<br>")
 
109
 
110
  self.header_block()
111
  self.risks = gr.State()
112
+
113
  # Risk assessment based on user intents
114
  self.risk_extraction()
115
 
116
+ # Register event listener
117
  self.risk_execute.click(
118
+ fn=clear_previous_risks,
119
+ inputs=[],
120
+ outputs=[self.assessment_sec, self.risks, self.assessed_risks, self.download, self.assessed_risk_definition, self.relatedrisks, self.mitigations, self.benchmarks, self.mitigations_text],
121
+ ).then(
122
  fn=risk_identifier,
123
  inputs=[
124
  self.usecase,
 
130
  )
131
 
132
  self.assessed_risks.select(
133
+ fn=clear_previous_mitigations,
134
+ inputs=[],
135
+ outputs=[self.assessed_risk_definition, self.relatedrisks, self.mitigations, self.benchmarks, self.mitigations_text]
136
+ ).then(
137
  fn=mitigations,
138
  inputs=[self.assessed_risks, self.taxonomy],
139
  # NOTETOSELF: Intent based risk is stored in self.risk (if needed)
executor.py CHANGED
@@ -24,11 +24,20 @@ from functools import lru_cache
24
  ran = RiskAtlasNexus() # type: ignore
25
 
26
 
 
 
 
 
 
 
 
 
27
  @lru_cache
28
  def risk_identifier(usecase: str,
29
- model_name_or_path: str = "ibm/granite-20b-code-instruct",
30
  taxonomy: str = "ibm-risk-atlas"): # -> List[Dict[str, Any]]: #pd.DataFrame:
31
 
 
32
  inference_engine = WMLInferenceEngine(
33
  model_name_or_path= model_name_or_path,
34
  credentials={
@@ -61,12 +70,15 @@ def risk_identifier(usecase: str,
61
  'risks': [json.loads(r.json()) for r in risks]
62
  }
63
  file_path = Path("static/download.json")
64
- file_path.write_text(json.dumps(data, indent=4), encoding='utf-8')
65
-
 
 
 
66
  #return out_df
67
  return out_sec, gr.State(risks), gr.Dataset(samples=[r.id for r in risks],
68
  sample_labels=sample_labels,
69
- samples_per_page=50, visible=True, label="Estimated by an LLM."), gr.DownloadButton("Download JSON", visible=True, value="static/download.json")
70
 
71
 
72
  @lru_cache
 
24
  ran = RiskAtlasNexus() # type: ignore
25
 
26
 
27
+ def clear_previous_risks():
28
+ return gr.Markdown("""<h2> Potential Risks </h2> """), [], gr.Dataset(samples=[],
29
+ sample_labels=[],
30
+ samples_per_page=50, visible=False), gr.DownloadButton("Download JSON", visible=False, ), "", gr.Dataset(samples=[], sample_labels=[], visible=False), gr.DataFrame([], wrap=True, show_copy_button=True, show_search="search", visible=False), gr.DataFrame([], wrap=True, show_copy_button=True, show_search="search", visible=False), gr.Markdown(" ")
31
+
32
+ def clear_previous_mitigations():
33
+ return "", gr.Dataset(samples=[], sample_labels=[], visible=False), gr.DataFrame([], wrap=True, show_copy_button=True, show_search="search", visible=False), gr.DataFrame([], wrap=True, show_copy_button=True, show_search="search", visible=False), gr.Markdown(" ")
34
+
35
  @lru_cache
36
  def risk_identifier(usecase: str,
37
+ model_name_or_path: str = "ibm/granite-3-3-8b-instruct",
38
  taxonomy: str = "ibm-risk-atlas"): # -> List[Dict[str, Any]]: #pd.DataFrame:
39
 
40
+ downloadable = False
41
  inference_engine = WMLInferenceEngine(
42
  model_name_or_path= model_name_or_path,
43
  credentials={
 
70
  'risks': [json.loads(r.json()) for r in risks]
71
  }
72
  file_path = Path("static/download.json")
73
+ with open(file_path, mode='w') as f:
74
+ f.write(json.dumps(data, indent=4))
75
+ downloadable = True
76
+
77
+
78
  #return out_df
79
  return out_sec, gr.State(risks), gr.Dataset(samples=[r.id for r in risks],
80
  sample_labels=sample_labels,
81
+ samples_per_page=50, visible=True, label="Estimated by an LLM."), gr.DownloadButton("Download JSON", "static/download.json", visible=(downloadable and len(risks) > 0))
82
 
83
 
84
  @lru_cache