End of training
Browse files
    	
        .gitattributes
    CHANGED
    
    | @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | 
|  | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 36 | 
            +
            trainer_state.json filter=lfs diff=lfs merge=lfs -text
         | 
    	
        README.md
    CHANGED
    
    | @@ -2,6 +2,7 @@ | |
| 2 | 
             
            license: apache-2.0
         | 
| 3 | 
             
            base_model: google/vit-base-patch16-224
         | 
| 4 | 
             
            tags:
         | 
|  | |
| 5 | 
             
            - generated_from_trainer
         | 
| 6 | 
             
            datasets:
         | 
| 7 | 
             
            - imagefolder
         | 
| @@ -17,7 +18,7 @@ should probably proofread and complete it, then remove this comment. --> | |
| 17 |  | 
| 18 | 
             
            This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset.
         | 
| 19 | 
             
            It achieves the following results on the evaluation set:
         | 
| 20 | 
            -
            - Loss: 0. | 
| 21 |  | 
| 22 | 
             
            ## Model description
         | 
| 23 |  | 
|  | |
| 2 | 
             
            license: apache-2.0
         | 
| 3 | 
             
            base_model: google/vit-base-patch16-224
         | 
| 4 | 
             
            tags:
         | 
| 5 | 
            +
            - masked-auto-encoding
         | 
| 6 | 
             
            - generated_from_trainer
         | 
| 7 | 
             
            datasets:
         | 
| 8 | 
             
            - imagefolder
         | 
|  | |
| 18 |  | 
| 19 | 
             
            This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset.
         | 
| 20 | 
             
            It achieves the following results on the evaluation set:
         | 
| 21 | 
            +
            - Loss: 0.3360
         | 
| 22 |  | 
| 23 | 
             
            ## Model description
         | 
| 24 |  | 
    	
        all_results.json
    ADDED
    
    | @@ -0,0 +1,11 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "epoch": 800.0,
         | 
| 3 | 
            +
                "eval_loss": 0.3360130190849304,
         | 
| 4 | 
            +
                "eval_runtime": 63.6007,
         | 
| 5 | 
            +
                "eval_samples_per_second": 79.543,
         | 
| 6 | 
            +
                "eval_steps_per_second": 9.953,
         | 
| 7 | 
            +
                "train_loss": 0.36668021675006923,
         | 
| 8 | 
            +
                "train_runtime": 517141.5399,
         | 
| 9 | 
            +
                "train_samples_per_second": 44.339,
         | 
| 10 | 
            +
                "train_steps_per_second": 5.543
         | 
| 11 | 
            +
            }
         | 
    	
        eval_results.json
    ADDED
    
    | @@ -0,0 +1,7 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "epoch": 800.0,
         | 
| 3 | 
            +
                "eval_loss": 0.3360130190849304,
         | 
| 4 | 
            +
                "eval_runtime": 63.6007,
         | 
| 5 | 
            +
                "eval_samples_per_second": 79.543,
         | 
| 6 | 
            +
                "eval_steps_per_second": 9.953
         | 
| 7 | 
            +
            }
         | 
    	
        runs/Mar19_21-52-01_ip-172-16-71-114.us-west-2.compute.internal/events.out.tfevents.1711402369.ip-172-16-71-114.us-west-2.compute.internal.1165.1
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:8a38040fbb685135a3165611e85ba1ed7713c5c953fe32dc197348318aea5cc8
         | 
| 3 | 
            +
            size 369
         | 
    	
        train_results.json
    ADDED
    
    | @@ -0,0 +1,7 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "epoch": 800.0,
         | 
| 3 | 
            +
                "train_loss": 0.36668021675006923,
         | 
| 4 | 
            +
                "train_runtime": 517141.5399,
         | 
| 5 | 
            +
                "train_samples_per_second": 44.339,
         | 
| 6 | 
            +
                "train_steps_per_second": 5.543
         | 
| 7 | 
            +
            }
         | 
    	
        trainer_state.json
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:15003b4e8b1213332e03ff373c3a6117ea30db6a0a4c05e4d09f6285cb6c8551
         | 
| 3 | 
            +
            size 47275597
         |