Jump to content

fredreload

Senior Members
  • Posts

    1123
  • Joined

  • Last visited

Posts posted by fredreload

  1. I work with weather data sometimes.

    So my working files are pretty nearly random numbers.

    Actually I've worked out another equation, I'm gonna test it out later tonight

    x-y=w

    y-x=z

    w=-z

     

    How do you solve for x and y Strange?

    Ya, it doesn't seem it works either

     

    P.S. All this program does is splitting things into parts

  2. Na, not on random numbers, it needs to be a working file lol. Anyway, this is the ultra fast version. Not much compression done on one iteration really. I'll work on it more tomorrow

     

    using System;
    using System.Collections.Generic;
    using System.Linq;

    namespace HelloWorld
    {
    class Hello
    {
    public static byte[] bytes = System.IO.File.ReadAllBytes(@"E:\data\Blank.gif");

    public static ulong[] array = new ulong[(bytes.Length / 14) + (bytes.Length % 14)];
    public static ulong[] arraya = new ulong[(bytes.Length / 14) + (bytes.Length % 14)];

    public static bool[] gtlt = new bool[(bytes.Length / 14) + (bytes.Length % 14)];

    public static byte[] result = new byte[bytes.Length];

    static void Main()
    {
    Console.WriteLine("Compressing");

    compress_test();

    Console.WriteLine("Decompressing");

    decompress_test();

    // Keep the console window open in debug mode.
    Console.WriteLine("Press any key to exit.");
    Console.ReadKey();
    }

    static byte IteratedBitcount(ulong n)
    {
    ulong test = n;
    int count = 0;

    while (test != 0)
    {
    if ((test & 1) == 1)
    {
    count++;
    }
    test >>= 1;
    }
    return Convert.ToByte(count);
    }

    protected static void decompress_test()
    {
    int l = 0;

    for (int i = 0; i < array.Length; i++)
    {
    ulong vals = 0;
    ulong valb = 0;

    ulong k = array;
    ulong o = arraya;

    valb = (o+k) / 2;
    vals = (o-k) / 2;

    if (!gtlt)
    {

    if (!((l + 13) >= bytes.Length))
    {
    result[l + 13] = (byte)(valb >> 48);
    }
    if (!((l + 12) >= bytes.Length))
    {
    result[l + 12] = (byte)(valb >> 40);
    }
    if (!((l + 11) >= bytes.Length))
    {
    result[l + 11] = (byte)(valb >> 32);
    }
    if (!((l + 10) >= bytes.Length))
    {
    result[l + 10] = (byte)(valb >> 24);
    }
    if (!((l + 9) >= bytes.Length))
    {
    result[l + 9] = (byte)(valb >> 16);
    }
    if (!((l + 8) >= bytes.Length))
    {
    result[l + 8] = (byte)(valb >> 8);
    }
    if (!((l + 7) >= bytes.Length))
    {
    result[l + 7] = (byte)valb;
    }

    if (!((l + 6) >= bytes.Length))
    {
    result[l + 6] = (byte)(vals >> 48);
    }
    if (!((l + 5) >= bytes.Length))
    {
    result[l + 5] = (byte)(vals >> 40);
    }
    if (!((l + 4) >= bytes.Length))
    {
    result[l + 4] = (byte)(vals >> 32);
    }
    if (!((l + 3) >= bytes.Length))
    {
    result[l + 3] = (byte)(vals >> 24);
    }
    if (!((l + 2) >= bytes.Length))
    {
    result[l + 2] = (byte)(vals >> 16);
    }
    if (!((l + 1) >= bytes.Length))
    {
    result[l + 1] = (byte)(vals >> 8);
    }
    if (!((l + 0) >= bytes.Length))
    {
    result[l + 0] = (byte)vals;
    }
    }
    else
    {
    if (!((l + 13) >= bytes.Length))
    {
    result[l + 13] = (byte)(vals >> 48);
    }
    if (!((l + 12) >= bytes.Length))
    {
    result[l + 12] = (byte)(vals >> 40);
    }
    if (!((l + 11) >= bytes.Length))
    {
    result[l + 11] = (byte)(vals >> 32);
    }
    if (!((l + 10) >= bytes.Length))
    {
    result[l + 10] = (byte)(vals >> 24);
    }
    if (!((l + 9) >= bytes.Length))
    {
    result[l + 9] = (byte)(vals >> 16);
    }
    if (!((l + 8) >= bytes.Length))
    {
    result[l + 8] = (byte)(vals >> 8);
    }
    if (!((l + 7) >= bytes.Length))
    {
    result[l + 7] = (byte)vals;
    }

    if (!((l + 6) >= bytes.Length))
    {
    result[l + 6] = (byte)(valb >> 48);
    }
    if (!((l + 5) >= bytes.Length))
    {
    result[l + 5] = (byte)(valb >> 40);
    }
    if (!((l + 4) >= bytes.Length))
    {
    result[l + 4] = (byte)(valb >> 32);
    }
    if (!((l + 3) >= bytes.Length))
    {
    result[l + 3] = (byte)(valb >> 24);
    }
    if (!((l + 2) >= bytes.Length))
    {
    result[l + 2] = (byte)(valb >> 16);
    }
    if (!((l + 1) >= bytes.Length))
    {
    result[l + 1] = (byte)(valb >> 8);
    }
    if (!((l + 0) >= bytes.Length))
    {
    result[l + 0] = (byte)valb;
    }
    }


    Console.WriteLine(l);
    l = l + 14;
    }

    System.IO.File.WriteAllBytes(@"E:\data\kBlan.gif", result);
    }

    protected static void compress_test()
    {
    byte[] array81 = new byte[8];
    byte[] array82 = new byte[8];

    for (int i = 0; i < bytes.Length; i = i + 14)
    {
    int count = 0;
    for (int j = 0; j < 8; j++)
    {
    if (i + j >= bytes.Length || i+j+7>=bytes.Length || j==7)
    {
    array81[j] = 0;
    array82[j] = 0;
    }
    else
    {
    array81[j] = bytes[i + count];
    array82[j] = bytes[i + count + 7];
    }
    count++;
    }
    //7~0
    ulong val1 = BitConverter.ToUInt64(array81, 0);
    //7~0
    ulong val2 = BitConverter.ToUInt64(array82, 0);

    ulong diff;

    if (array81[6] > array82[6])
    {
    diff = (ulong)(val1 - val2);
    gtlt[i / 14] = true;
    array[i / 14] = diff;

    /*
    byte[] result = new byte[8];

    bytes[0] = (byte)(diff >> 56);
    bytes[1] = (byte)(diff >> 48);
    bytes[2] = (byte)(diff >> 40);
    bytes[3] = (byte)(diff >> 32);
    bytes[4] = (byte)(diff >> 24);
    bytes[5] = (byte)(diff >> 14);
    bytes[6] = (byte)(diff >> 8);
    bytes[7] = (byte)diff;
    */
    }
    else
    {
    diff = (ulong)(val2 - val1);
    gtlt[i / 14] = false;
    array[i / 14] = diff;
    }
    ulong add = (ulong)(val1 + val2);
    arraya[i / 14] = add;

    }
    }
    }
    }

  3. LOL from the guy who first told him, that's funny.

    I thought it'd for sure work = =, guess I was too confident

     

    P.S Wait, it works, the trick is to add the plus side as well, I'll try and get the algorithm again

    Here's the complete code and the image here.

     

    using System;

    using System.Collections.Generic;

    using System.Linq;

     

    namespace HelloWorld

    {

    class Hello

    {

    public static byte[] bytes = System.IO.File.ReadAllBytes(@"E:\data\Blank.gif");

    public static ushort[] array = new ushort[(bytes.Length / 4) + (bytes.Length % 4)];

    public static ushort[] arraya = new ushort[(bytes.Length / 4) + (bytes.Length % 4)];

     

    public static byte[] c1 = new byte[(bytes.Length / 4) + (bytes.Length % 4)];

    public static byte[] c2 = new byte[(bytes.Length / 4) + (bytes.Length % 4)];

     

    public static byte[] result = new byte[bytes.Length];

     

    public static bool[] gtlt = new bool[(bytes.Length / 4) + (bytes.Length % 4)];

     

    static void Main()

    {

    Console.WriteLine("Compressing");

     

    compress_test();

     

    Console.WriteLine("Decompressing");

     

    decompress_test();

     

    // Keep the console window open in debug mode.

    Console.WriteLine("Press any key to exit.");

    Console.ReadKey();

    }

     

    static int CountBits(int value)

    {

    value <<= 1;

    int cnt = 0;

    while (value != 0)

    {

    cnt += (0xe994 >> (value & 14)) & 3;

    value >>= 3;

    }

    return cnt;

    }

     

    static IEnumerable<int> PermutateBits(int bits, int bitsSet)

    {

    int min = 0x7fffffff >> (31 - bitsSet);

    int max = min << (bits - bitsSet);

    for (int i = min; i <= max; i++)

    {

    if (CountBits(i) == bitsSet)

    {

    yield return i;

    }

    }

    }

     

    static string Bin(int value, int len)

    {

    return (len > 1 ? Bin(value >> 1, len - 1) : null) + "01"[value & 1];

    }

     

     

     

     

    static byte IteratedBitcount(ushort n)

    {

    ushort test = n;

    int count = 0;

     

    while (test != 0)

    {

    if ((test & 1) == 1)

    {

    count++;

    }

    test >>= 1;

    }

    return Convert.ToByte(count);

    }

     

    protected static void decompress_test()

    {

    int l = 0;

     

    for (int i = 0; i < array.Length; i++)

    {

    ushort vals = 0;

    ushort valb = 0;

     

    if (c1 == 0 && c2 == 0)

    {

    vals = array;

    valb = array;

    }

    else

    {

    ushort k = array;

    ushort o = arraya;

     

    bool check = false;

     

    foreach (int m in PermutateBits(16, c1))

    {

    foreach (int n in PermutateBits(16, c2))

    {

    valb = Convert.ToUInt16(Bin(m, 16), 2);

    vals = Convert.ToUInt16(Bin(n, 16), 2);

     

    if (((ushort)(valb - vals) == k)&&((ushort)(valb + vals) == o))

    {

    check = true;

    break;

    }

    }

    if (check)

    {

    break;

    }

    }

     

    }

    if (gtlt)

    {

    if (!((l + 3) >= bytes.Length))

    {

    result[l + 3] = (byte)(valb >> 8);

    }

    if (!((l + 2) >= bytes.Length))

    {

    result[l + 2] = (byte)valb;

    }

    if (!((l + 1) >= bytes.Length))

    {

    result[l + 1] = (byte)(vals >> 8);

    }

    if (!((l + 0) >= bytes.Length))

    {

    result[l + 0] = (byte)vals;

    }

    }

    else

    {

    if (!((l + 3) >= bytes.Length))

    {

    result[l + 3] = (byte)(vals >> 8);

    }

    if (!((l + 2) >= bytes.Length))

    {

    result[l + 2] = (byte)vals;

    }

    if (!((l + 1) >= bytes.Length))

    {

    result[l + 1] = (byte)(valb >> 8);

    }

    if (!((l + 0) >= bytes.Length))

    {

    result[l + 0] = (byte)valb;

    }

    }

     

    Console.WriteLine(l);

    l = l + 4;

    }

     

    System.IO.File.WriteAllBytes(@"E:\data\kBlan.gif", result);

    }

     

    protected static void compress_test()

    {

     

     

    byte[] array16 = new byte[4];

     

    for (int i = 0; i < bytes.Length; i = i + 4)

    {

    for (int j = 0; j < 4; j++)

    {

    if (i + j >= bytes.Length)

    {

    array16[j] = 0;

    }

    else

    {

    array16[j] = bytes[i + j];

    }

    }

    //3~2

    ushort val1 = BitConverter.ToUInt16(array16, 2);

    //1~0

    ushort val2 = BitConverter.ToUInt16(array16, 0);

     

    ushort diff;

     

    if (array16[3] > array16[1])

    {

    diff = (ushort)(val1 - val2);

    gtlt[i / 4] = true;

    array[i / 4] = diff;

     

    c1[i / 4] = IteratedBitcount(val1);

    c2[i / 4] = IteratedBitcount(val2);

     

    /*

    byte[] result = new byte[8];

     

    bytes[0] = (byte)(diff >> 56);

    bytes[1] = (byte)(diff >> 48);

    bytes[2] = (byte)(diff >> 40);

    bytes[3] = (byte)(diff >> 32);

    bytes[4] = (byte)(diff >> 24);

    bytes[5] = (byte)(diff >> 16);

    bytes[6] = (byte)(diff >> 8);

    bytes[7] = (byte)diff;

    */

    }

    else

    {

    diff = (ushort)(val2 - val1);

    gtlt[i / 4] = false;

    array[i / 4] = diff;

     

    c1[i / 4] = IteratedBitcount(val2);

    c2[i / 4] = IteratedBitcount(val1);

     

    /*

    byte[] result = new byte[8];

     

    bytes[0] = (byte)(diff >> 56);

    bytes[1] = (byte)(diff >> 48);

    bytes[2] = (byte)(diff >> 40);

    bytes[3] = (byte)(diff >> 32);

    bytes[4] = (byte)(diff >> 24);

    bytes[5] = (byte)(diff >> 16);

    bytes[6] = (byte)(diff >> 8);

    bytes[7] = (byte)diff;

    */

    }

     

    if (diff == 0)

    {

    c1[i / 4] = 0;

    c2[i / 4] = 0;

    array[i / 4] = val1;

    arraya[i / 4] = val1;

    }

     

    ushort add = (ushort)(val1 + val2);

    arraya[i / 4] = add;

     

    }

    }

    }

    }

  4. I'm having a huge binary permutation, the safest bet is stick with 16 bits. Well I need a way to generate all the binary permutations, for instance, a 4 bits binary with 2 one bits would be 0011,0101,0110,1001, etc. If you have working code it would be cool, else I'll try to Google more tomorrow

     

    P.S. Ya there are multiple possible answer for this algorithm = =, it failed, I guess I'll have to try other methods

  5. So I wrote the compression test based on the math I presented in the Mathmatics section. It successfully reduces the size of the image by half with only 1 iteration. C1 and c2 takes around 1889+ bytes, you are free to test it on any files. I'll work on the decompression some other time

     

    static byte IteratedBitcount(ulong n)
    {
    ulong test = n;
    int count = 0;
    while (test != 0)
    {
    if ((test & 1) == 1)
    {
    count++;
    }
    test >>= 1;
    }
    return Convert.ToByte(count);
    }

     

    protected void compress_test()
    {
    byte[] bytes = System.IO.File.ReadAllBytes(@"C:\Python2712\Scripts\compression_test\Result.jpg");
    ulong[] array = new ulong[(bytes.Length / 16)+(bytes.Length % 16)];
    byte[] c1 = new byte[(bytes.Length / 16) + (bytes.Length % 16)];
    byte[] c2 = new byte[(bytes.Length / 16) + (bytes.Length % 16)];
    bool[] gtlt = new bool[(bytes.Length / 16) + (bytes.Length % 16)];
    byte[] array16 = new byte[16];
    for( int i=0; i< bytes.Length; i=i+16)
    {
    for (int j = 0; j <16; j++)
    {
    if (i + j >= bytes.Length)
    {
    array16[j] = 0;
    }
    else
    {
    array16[j] = bytes[i + j];
    }
    }
    //15~8
    ulong val1 = BitConverter.ToUInt64(array16, 8);
    //7~0
    ulong val2 = BitConverter.ToUInt64(array16, 0);
    ulong diff;
    if (array16[15] > array16[7])
    {
    diff = val1 - val2;
    gtlt[i/16] = true;
    array[i/16] = diff;
    /*
    byte[] bytess = new byte[8];
    bytes[0] = (byte)(diff >> 56);
    bytes[1] = (byte)(diff >> 48);
    bytes[2] = (byte)(diff >> 40);
    bytes[3] = (byte)(diff >> 32);
    bytes[4] = (byte)(diff >> 24);
    bytes[5] = (byte)(diff >> 16);
    bytes[6] = (byte)(diff >> 8);
    bytes[7] = (byte)diff;
    */
    }
    else
    {
    diff = val2 - val1;
    gtlt[i/16] = false;
    array[i/16] = diff;
    /*
    byte[] bytess = new byte[8];
    bytes[0] = (byte)(diff >> 56);
    bytes[1] = (byte)(diff >> 48);
    bytes[2] = (byte)(diff >> 40);
    bytes[3] = (byte)(diff >> 32);
    bytes[4] = (byte)(diff >> 24);
    bytes[5] = (byte)(diff >> 16);
    bytes[6] = (byte)(diff >> 8);
    bytes[7] = (byte)diff;
    */
    }
    c1[i / 16] = IteratedBitcount(val1);
    c2[i / 16] = IteratedBitcount(val2);
    }
    }
  6. It's called non-photorealistic rendering

    https://en.wikipedia.org/wiki/Non-photorealistic_rendering

     

    And example of it is cel shading

    https://en.wikipedia.org/wiki/Cel_shading

     

     

    Used computer has nothing to do with effects (as long as it is fast enough (Macs can have 6 cores) and 3D application with plugins is available for it).

    Cel shading is pretty fast, definitely much faster to render than photorealistic rendering.

    Even single core 20 years old obsolete CPU can handle it (you would have to use older version of 3D app with plugins, modern probably require SSE2/SSE3)

     

    Studio making movie for TV/cinema, should have enough money to hire programmer who would make proprietary plugin for cel shading, with required by director and producer features.

    I'm thinking about the Physics involved in just creating 3D scenes that act like 2D, the particle effect, the visuals, the different angles of lighting. All that calls for tons of programming time and computing power without making it seems rigid. But can an anime character really exists in 3D? That is something up for debate lol, cel shading looks interesting, I've programmed flat shading technique back in college, both have different visual effects. The idea of a lighting hack implemented makes everything plastic though, forgot where that's from

     

    P.S. I think you are on the right track with cel shading

  7.  

     

    hi , I am not engineer. I am mathematician and my studies are closely related with mathematical engineering.

    I could not understand well your question well why (presumably) because of I have not enough knowledge relevant engineering.

     

    but ;

     

    AS EXTERNAL INSTRUCTION

     

    I would remind that we cannot write this equation everytime: A+B = C

    for instance if A is element of R3 and B is element of R4 ,then we will not be able to write this equation ..

    but with extreme probability ajb's knowledge is higher than mine on this subject,I know this cannot be written everytime.

    if he has enough time ,maybe he help you about your question.

     

    blue

    Hi blue, my solution for compression is actually in the A-B=C thread, you're free to check my math and come up with a better notation, I came up with it on the whim so it could be wrong. Thanks for your time

     

    Most compression algorithms use bits that aren't normally used. For instance lets say my computer can read 64-bit which is

     

    0000000000000000000000000000000000000000000000000000000000000000

     

    Then lets say that I want to store numbers I can store a number with 4 bits.

     

    0 = 0000

    1 = 0001

    2 = 0010

    3 = 0011

    4 = 0100

    5 = 0101

    6 = 0110

    7 = 0111

    8 = 1000

    9 = 1001

     

    Now lets say my computer stores the number 19 in 64 - bit it could be stored something like this (It probably isn't).

     

    00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000001001

     

    The same number in 4 bits is

     

    00011001

     

    We can see the excessive use of space here a bad compression algorithm could use 64/4 or 16 numbers

     

    0000,0000,0000,0000,0000,0000,0000,0000,0000,0000,0000,0000,0000,0000,0000,0001

     

    however a good compression algorithm could have 2^64/16 or 1152921504606846976 numbers.

     

    Decompression was generally handled by the cpu until hd televisions came out then gpu accelerated video decoding became more of a thing.

    Hi fiveworlds, I came up with a solution on the A-B=C thread, I haven't found a way to reverse A and B from a circuit. My idea about the compression is pretty much A+B=C. Let's say you have 128 bits pattern like the one you have, you split it in half left and right so you have 64 bits on the left, that is your A, and 64 bits on the right, that is your B, the end result C is gonna have 64 bits. With this you've successfully reduced 128bits with A and B down to 64 bits C only. And it can be recursive, meaning you use C as 64 bits and you split it into 32 bits on the left, that is your A, and 32 bits on the right, that is your B. Eventually you are going to reduce all 128 bits into a single bit of either 0 or 1, my other thread of A-B=C requires you to store the bit information of A and B which takes around 2^6=32 or 6 bits to store this information for A and another 6 bits for B. You add this character bits on the way down that is 6+5+4+3+2+1 = 21 bits total from an 128 bits information. You can also reduce this character bits of 21 bits keys to a smaller value as well, but the work gets a bit tedious at this point. I would like to start with a 256 bit, essentially double of 128bits and it can be reduced down to 7+6+5+4+3+2+1=28 bits. But again managing this 256 information would be kind of tedious as well since it is out of the scope of ordinary integer storage so, well I'd love to hear from you guys

  8. My bad guys, I should mention that if you want to back solve the circuit eventually you need all Borin Borout and D to back solve A and B, if Borout is missing it won't work either

  9. Modifying your DNA, especially as an adult rather than in utero, is far more likely to give you cancer than it is to repair your organs. I think I'll pass.

    Cancer is just an error in manipulating DNA, if you fully understand the technology to control DNA then you could technically fix any cancer, speculating

    How about 5th answer: by viruses?

    That's the most typical way DNA are changed/modified on daily basis.

    Virus attacking cell, is replacing/modifying DNA, to its own introduced.

    So one can create virus which will replace your own DNA with your own DNA+some modifications.

     

    Instead of fighting with virus/bacteria one can replace its own DNA, the all cells, with DNA with modifications disallowing these viruses/bacteria to attacking you.

     

    Of course.

    That should be the aim for every intelligent person.

    That's what for believers pray in churches, to live after death.

    So why not live "forever", prior death, if it never happens?

     

    Again, I hear sound of "I would be bored to death by such forever life".... WTF?!

     

    Do you really REALLY have nothing to do?

    I am looking at these old persons, and plentiful of them are dead already, just eating food and drinking and breathing, but their mind is dead.

    They just wait for death.. Thinking about it day long.. That's really sad.. So much opportunities in this world Universe, and they WANT to die.. That's fucking unbelievable how stupid can be humans.

    Albert Einstein said "Two things are infinite: the universe and human stupidity; and I'm not sure about the universe"

    Here I can perfectly agree.

    Hmm, one thing about virus is that it can't insert DNA at the correct place compares to crispr and accuracy is important. If there is a way control where virus insert the DNA I'd love to hear it :P

  10. Well it's easy to bypass this exception, since you only need one set of value, A or B, so set both bit count to zero(they might not be zero but this means A and B are the same) and set C to either A or B

  11. Right well, before you think I'm crazy. I'm speculating on how DNA can be modified in the future, give or take hopefully within the next 25 years. I set up a poll and you are welcome to vote on it.

     

    1. The Crispr/CAS9 is a well known method that uses a bacteria enzyme to cut and modify the DNA. They've had successful modification of the animal embryo, but it could be error prone. It uses a molecular scissor to cut the DNA and after that you swap in the desired DNA sequence.

     

    2. The nanomachine method is all theoretical, scientist are working on building a nanomachine of this scale not to mention method of mass production. Let's imagine each nanomachine contains a wireless antenna that sends signal to your computer. You can track its position and you can control when and where you want to make the cut at the click of a button, imaging the nanomachine can be done with laser scanning, you provide the nanomachine with the correct power source and a mechanical scissor that is able to cut your DNA. You can have it carry the DNA sequence for you and swap in the sequence like planting a tree, speculating. It is recycleable

     

    3. The laser light capable of breaking the DNA molecular bond at the desired area. You've ever played chop the fruit on cell phone? Kind of like that except you are slicing DNA with a laser light. You get to image the DNA sequence with laser scanning technology again. You make a precise cut a the right location and you use a light tweezer to swap the DNA sequence to the correct location after you've made the cut. It sounds tedious at first but once you've got all the laser light programmed to beam at the right place, you would cover your entire body with the laser light, then with one button click the DNA could be swapped. It could be quite convenient

     

    Well as much as science have many breakthroughs in this area currently we are in stage one, but there is always a chance that an error can exist and it cannot be controlled. Once it enters the body, it's all on its own. That makes the second and third techniques more flexible. I'd imagine number three is probably easier to construct than number two, but both of these methods are quite desirable. With the laser scanning technology they could be working at 100% accuracy. I'm not discouraging method one either since many applications are still being tested out at this case, but I'll let you guys decide on this one. My pick with be number 3

  12. Alright, so I think I solved this algorithm, I call it the shrink ray, I got the idea from Minions. Pretty much with any number of A-B in binary, you can reverse engineer A and B from C alone knowing how many 1's and 0's there are for A and B.

     

    For example:

     

    1010 A (two 1's, two 0's)

    -0101 B (two 1's, two 0's)

    --------

    0101 C

     

    Note: I've only tested the condition for A>B, not negative numbers

    Now you reverse engineer A and B from C, you know C is 5 and A and B are bigger, so you can have A=6 B=1, A=7 B=2, A=8 B=3, A=9 B=4, A=10 B=5, A=11 B=6, A=12 B=7, A=13 B=8, A=14 B=9, A=15, B=10. Now with all these conditions available, the only set that holds true with A having two 1's and B having two 1's are A=10 and B=5. So that's the solution. I think you can reverse engineer any A and B with C and the bits information on A and B.

     

    If A and B are 32 bits each, it only takes 6 bits each to store the bit information since the largest possible sum is 32, so I think it's pretty good in terms of shrinking the size down, check the math for me will ya Strange?

×
×
  • Create New...

Important Information

We have placed cookies on your device to help make this website better. You can adjust your cookie settings, otherwise we'll assume you're okay to continue.