This commit is contained in:
Marco Realacci 2024-11-02 16:28:37 +01:00
parent be7844b4f3
commit eea09ec9b8
15 changed files with 35749 additions and 63 deletions

View file

@ -5,5 +5,6 @@
"mathlive-in-editor-mode",
"smart-second-brain",
"local-gpt",
"obsidian-latex-suite"
"obsidian-latex-suite",
"companion"
]

19
.obsidian/plugins/companion/data.json vendored Normal file
View file

@ -0,0 +1,19 @@
{
"provider": "ollama",
"model": "qwen2.5:14b",
"enable_by_default": false,
"keybind": "Tab",
"delay_ms": 2000,
"stream": true,
"accept": {
"splitter_regex": " ",
"display_splitter_regex": "[.?!:;]",
"completion_completeness_regex": ".*(?!p{L})[^d]$",
"min_accept_length": 4,
"min_display_length": 50,
"retrigger_threshold": 48
},
"provider_settings": {},
"presets": [],
"fallback": null
}

35514
.obsidian/plugins/companion/main.js vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,11 @@
{
"id": "companion",
"name": "Companion",
"version": "1.8.0",
"minAppVersion": "1.1.16",
"description": "Autocomplete with AI, including ChatGPT and ollama, through a copilot-like interface.",
"author": "rizerphe",
"authorUrl": "https://github.com/rizerphe",
"fundingUrl": "https://www.buymeacoffee.com/rizerphe",
"isDesktopOnly": false
}

25
.obsidian/plugins/companion/styles.css vendored Normal file
View file

@ -0,0 +1,25 @@
/* src/styles.sass */
.ai-complete-advanced-settings {
padding-left: 1rem;
}
.ai-complete-chatgpt-full-width {
width: 100%;
min-height: 120px;
resize: none;
}
.ai-complete-jurassic-expandable {
display: flex;
flex-direction: row;
align-items: center;
gap: 0.5rem;
}
.ai-complete-ollama-full-width {
width: 100%;
min-height: 120px;
resize: none;
}
.ai-complete-groq-full-width {
width: 100%;
min-height: 120px;
resize: none;
}

View file

@ -2,7 +2,7 @@
"providers": {
"ollama": {
"ollamaUrl": "http://localhost:11434",
"defaultModel": "llama3.1:8b-instruct-q8_0",
"defaultModel": "qwen2.5:14b",
"embeddingModel": "nomic-embed-text",
"type": "ollama"
},
@ -28,7 +28,7 @@
"defaults": {
"provider": "ollama",
"fallbackProvider": "",
"creativity": "low"
"creativity": ""
},
"actions": [
{

View file

@ -5,10 +5,10 @@
"initialAssistantMessageContent": "Hello, I am your assistant. How can I help you?",
"isIncognitoMode": true,
"ollamaGenModel": {
"model": "llama3.1:8b-instruct-q8_0",
"model": "qwen2.5:14b",
"baseUrl": "http://localhost:11434",
"temperature": 0.5,
"contextWindow": 1948,
"contextWindow": 2048,
"lcModel": {
"lc": 1,
"type": "constructor",
@ -22,14 +22,126 @@
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1948
"context_window": 1248,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1348,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 1,
"context_window": 1448,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0,
"context_window": 1548,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0,
"context_window": 1648,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1748,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1848,
"lc_model": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"ollama",
"ChatOllama"
],
"kwargs": {
"model": "llama3.1:8b-instruct-q8_0",
"base_url": "http://localhost:11434",
"temperature": 0.5,
"context_window": 1948
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
},
"ollamaEmbedModel": {
"model": "nomic-embed-text",
"baseUrl": "http://localhost:11434",
"similarityThreshold": 0.75,
"similarityThreshold": 0.5,
"k": 100
},
"openAIGenModel": {
@ -49,7 +161,7 @@
"Chats",
"*.excalidraw.md"
],
"isQuickSettingsOpen": true,
"isQuickSettingsOpen": false,
"isVerbose": false,
"isOnboarded": true,
"hideIncognitoWarning": false,

View file

@ -4,21 +4,21 @@
"type": "split",
"children": [
{
"id": "5db0591e19cf632e",
"id": "d43d10180ff70873",
"type": "tabs",
"children": [
{
"id": "5af0211cef5316b5",
"id": "86cc34a714fe2576",
"type": "leaf",
"state": {
"type": "markdown",
"state": {
"file": "Autonomous Networking/notes/7 RL.md",
"file": "Autonomous Networking/notes/9 Markov processes.md",
"mode": "source",
"source": false
},
"icon": "lucide-file",
"title": "7 RL"
"title": "9 Markov processes"
}
}
]
@ -168,11 +168,12 @@
}
}
],
"currentTab": 4
"currentTab": 5
}
],
"direction": "horizontal",
"width": 604.5
"width": 604.5,
"collapsed": true
},
"left-ribbon": {
"hiddenItems": {
@ -187,11 +188,27 @@
"pdf-plus:PDF++: Toggle auto-focus": false,
"pdf-plus:PDF++: Toggle auto-paste": false,
"obsidian-git:Open Git source control": false,
"smart-second-brain:Open S2B Chat": false
"smart-second-brain:Open S2B Chat": false,
"companion:Toggle completion": false
}
},
"active": "0d5325c0f9289cea",
"active": "86cc34a714fe2576",
"lastOpenFiles": [
"Autonomous Networking/slides/6 IoT.pdf",
"Autonomous Networking/slides/9markovprocess.pdf",
"Autonomous Networking/slides/AutonomousNet-Class13-2122-Optimal_policy_and_Qlearning.pdf",
"Autonomous Networking/slides/AutonomousNet-Class11-2122-Performance_of_action_selection_methods_UCB.pdf",
"Autonomous Networking/notes/9 Markov processes.md",
"Autonomous Networking/slides/7 RL1.pdf",
"Autonomous Networking/notes/7.2 10 arm testbed - optimism in face of uncertainty.md",
"Autonomous Networking/notes/7.1 K-Armed bandit problem.md",
"Autonomous Networking/notes/2 RFID.md",
"Foundation of data science/notes/2 Logistic Regression.md",
"Foundation of data science/notes/3 Multi Class Binary Classification.md",
"Chats/New Chat.md",
"Foundation of data science/slides/Untitled.md",
"Senza nome.md",
"Autonomous Networking/images/Pasted image 20241031144640.png",
"Autonomous Networking/notes/7 RL.md",
"conflict-files-obsidian-git.md",
"Biometric Systems/slides/LEZIONE6_Face recognition2D.pdf",
@ -207,36 +224,22 @@
"Biometric Systems/images/Pasted image 20241031085606.png",
"Biometric Systems/images/Pasted image 20241031084659.png",
"Biometric Systems/images/Pasted image 20241030165705.png",
"Biometric Systems/images/Pasted image 20241030154413.png",
"Autonomous Networking/slides/AutonomousNet-Class13-2122-Optimal_policy_and_Qlearning.pdf",
"Autonomous Networking/slides/AutonomousNet-Class10-2122-Multiarmed_bandit.pdf",
"Autonomous Networking/slides/9markovprocess.pdf",
"Autonomous Networking/notes/9 Markov processes.md",
"Autonomous Networking/notes/7.2 10 arm testbed - optimism in face of uncertainty.md",
"Autonomous Networking/notes/7.1 K-Armed bandit problem.md",
"Autonomous Networking/notes/8.md",
"Autonomous Networking/notes/q&a.md",
"Autonomous Networking/notes/4 WSN Routing.md",
"Autonomous Networking/notes/5 Drones.md",
"Autonomous Networking/notes/2 RFID.md",
"Foundation of data science/notes/2 Logistic Regression.md",
"Foundation of data science/notes/3 Multi Class Binary Classification.md",
"Chats/New Chat.md",
"Foundation of data science/notes/1 CV Basics.md",
"Foundation of data science/images",
"Autonomous Networking/slides/AutonomousNet-Class11-2122-Performance_of_action_selection_methods_UCB.pdf",
"Chats",
"Autonomous Networking/slides/5 Drones.pdf",
"Autonomous Networking/slides/6 IoT.pdf",
"Autonomous Networking/notes/3 WSN MAC.md",
"Autonomous Networking/notes/6 Internet of Things.md",
"Autonomous Networking/notes/6.1 RL.md",
"Biometric Systems/notes/4. Face recognition.md",
"Foundation of data science/slides/Untitled.md",
"Biometric Systems/notes/3. Recognition Reliability.md",
"Foundation of data science/slides/notes 2.md",
"Biometric Systems/notes/2. Performance indexes.md",
"Biometric Systems/notes/1. Introduction.md",
"Senza nome.canvas"
]
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View file

@ -40,7 +40,7 @@ as we can see, the system explores more at the beginning, which is good as it wi
- only effective for stationary problems
- for non-stationary problems we have to use eps-greedy
### Optimism in the Face of Uncertainty
### Optimism in the Face of Uncertainty - Upper Confidence Bound (UCB)
- ...
- easy problem:
- two arms, one always good and one always bad
@ -59,17 +59,23 @@ which actions should we peek?
![[Pasted image 20241025090344.png]]
the brackets represent a confidence interval around q*(a). The system is confident that the value lies somewhere in the region.
The problem is that, when a region is large, we don't know where the average value is! So we have to try!
If region is very small, we are very certain!
![[Pasted image 20241025090549.png]]
In this situation we chose Q2 as estimated value is the highest.
![[Pasted image 20241031144640.png]]
But in this case Q1.
#### Action selection
![[Pasted image 20241025090625.png]]
... check slides for formula explaination ...
- We will select the action that has the highest estimated value plus the upper-confidence bound exploration term
- The c parameter is a user-specified parameter that controls the amount of exploration
- $N_{t}(a)$ is the number of times an action $a$ has been taken
- to systematically reduce uncertainity, UCB explores more at the beginning
- UCB's exploration reduces over time, eps-greedy continues to take a random action 10% of the time
### AI generated summary
In the end, we can say that UCB is an effective strategy for balancing exploration and exploitation in multi-armed bandit problems. Unlike ε-greedy methods, which maintain a fixed level of exploration throughout the process, UCB dynamically adjusts its exploration rate based on the uncertainty associated with each action's value estimates. This adaptability makes UCB particularly well-suited for scenarios where initial exploration is crucial to quickly identify high-reward actions but later iterations require more focused exploitation.

View file

@ -57,7 +57,7 @@ This is a Markov Process but we also have a reward function! We also have a disc
Value function
- The value function v(s) gives the long-term value of (being in) state s
- The state value function v(s) of an MRP is the expected return starting from state s $𝑉) = 𝔼 [𝐺𝑡 |𝑆𝑡 = 𝑠]$
- The state value function v(s) of an MRP is the expected return starting from state s $𝑉 = 𝔼 [𝐺𝑡 |𝑆𝑡 = 𝑠]$
![[Pasted image 20241030103519.png]]
![[Pasted image 20241030103706.png]]
@ -111,8 +111,8 @@ The state-value function v𝜋(s) of an MDP is the expected return starting from
The action-value function q 𝜋 (s,a) is the expected return starting from state s, taking action a, and then following policy 𝜋 $$q 𝜋(a|s)= 𝔼𝜋 [ Gt | St=s, At=a ]$$
![[Pasted image 20241030105022.png]]
- The state-value function can again be decomposed into immediate reward plus discounted value of successor state $$v\pi(s) = E\pi[Rt+1 + v⇡(St+1) | St = s]$$
- The action-value function can similarly be decomposed $$q\pi(s, a) = E\pi [Rt+1 + q⇡(St+1, At+1) | St = s, At = a]$$
- The state-value function can again be decomposed into immediate reward plus discounted value of successor state $$v_{\pi}(s) = E_{\pi}[R_{t+1} + v_{\pi}(S_{t+1}) | St = s]$$
- The action-value function can similarly be decomposed $$q_{\pi}(s, a) = E_{\pi}[R_{t+1} + q_{\pi}(S_{t+1}, A_{t+1}) | St = s, At = a]$$
![[Pasted image 20241030105148.png]]![[Pasted image 20241030105207.png]]
![[Pasted image 20241030105216.png]]
putting all together

View file

@ -2,44 +2,39 @@ Assistant
Hello, I am your assistant. How can I help you?
- - - - -
User
what are UAVs?
Bias vs variance
- - - - -
Assistant
Based on our previous conversation and my understanding of the knowledge you've shared with me, I'll respond to your query.
**Bias vs Variance**
=====================
**UAVs (Unmanned Aerial Vehicles)**
The bias-variance tradeoff is a fundamental concept in machine learning that refers to the balance between two types of errors: **bias** and **variance**.
UAVs, also known as drones or unmanned aircraft systems (UAS), are [[Remote Controlled Aircraft|remote controlled aircraft]] that operate without a human pilot on board. They can be used for various purposes, including:
### Bias
* Surveillance and reconnaissance
* Military operations
* Agricultural monitoring
* Environmental monitoring
* Aerial photography and videography
* Package delivery
* The error caused by **bias** measures how far off the predictions of a model are from the true value. It represents the difference between the predicted mean of the model and the correct value.
* A high bias indicates that the model is underfitting, meaning it's too simple to capture the underlying patterns in the data.
UAVs typically consist of an airframe, engine, propellers, sensors, and a control system. Some UAVs are controlled remotely by a human pilot, while others can fly autonomously using pre-programmed flight plans or GPS navigation.
### Variance
If you'd like to know more about UAVs, feel free to ask!
* The error due to **variance** measures how much the predictions of a model vary for a given point. It reflects the impact of randomness in the data on the prediction results.
* A high variance indicates that the model is overfitting, meaning it's too complex and fits the noise in the training data.
(Note: I couldn't find any specific notes on UAVs in your existing knowledge, so I've provided general information. If you have any additional notes or details, please let me know!)
- - - - -
User
latex absolute value
- - - - -
Assistant
It seems that there is no relevant information about "latex absolute value" in your existing knowledge. However, based on my understanding of LaTeX, I can provide you with a brief explanation.
### Diagrammatic Representation
**Absolute Value in LaTeX**
The bias-variance tradeoff can be visualized using a **target diagram**, where the center represents a perfect model that predicts the true values.
In LaTeX, the absolute value of an expression is denoted by the `\lvert` command for left absolute value and `\rvert` command for right absolute value. The `|` symbol can also be used to represent absolute value.
### Mathematical Definition
For example:
The prediction error at point x can be decomposed into bias, variance, and irreducible error:
* `\lvert x \rvert` represents the absolute value of `x`
* `|x|` is an alternative way to represent absolute value
$$\text{Err}(x) = \left(E[\hat{f}(x)] - f(x)\right)^2 + E\left[(\hat{f}(x) - E[\hat{f}(x)])^2\right] + \sigma^2_\epsilon$$
If you need more information or have specific questions about LaTeX, feel free to ask!
### Related Concepts
(Note: I've assumed that your knowledge on LaTeX is limited, so I've provided basic information. If you have any additional notes or details, please let me know!)
* **Optimism in the Face of Uncertainty - Upper Confidence Bound (UCB)**: Discusses balancing exploration and exploitation in a multi-armed bandit setting, related to bias-variance tradeoff.
* **Gaussian Filtering**: Describes linear filtering using a Gaussian kernel, which can smooth out noise and reduce variance.
* **Logistic Regression**: Introduces maximum likelihood estimation (MLE) for logistic regression, related to bias-variance tradeoff in model selection.
These concepts can be used to discuss the bias-variance tradeoff in more detail.
- - - - -

View file

@ -45,7 +45,7 @@ Calcoliamo la derivata parziale della funzione LL:
![[Pasted image 20241025165130.png]]
Però dobbiamo calcolarlo per tutti i sample ovviamente, per cui:
![[Pasted image 20241025165317.png]]
Poi aggiorniamo il valore così:
![[Pasted image 20241025165411.png]]
il parametro $\eta$ indica il rate di apprendimento, come nel gradiant descend della linear regression. Lo scegliamo noi, se è troppo grande l'algoritmo non convergerà mai, se è troppo piccolo ci metterà un fottio, ma sarà più preciso.

0
Senza nome.md Normal file
View file